code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import fcntl import os import socket import torch import torch.distributed as dist def a__ ( *_SCREAMING_SNAKE_CASE ): """simple docstring""" with open(_SCREAMING_SNAKE_CASE , "r" ) as fh: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*_SCREAMING_SNAKE_CASE ) finally: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) lowerCAmelCase__ = int(os.environ['''LOCAL_RANK''']) torch.cuda.set_device(local_rank) lowerCAmelCase__ = torch.device('''cuda''', local_rank) lowerCAmelCase__ = socket.gethostname() lowerCAmelCase__ = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('''nccl''') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank lowerCAmelCase__ = dist.get_rank() lowerCAmelCase__ = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
153
"""simple docstring""" class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ): A = name A = val def __str__( self :str ): return f"{self.__class__.__name__}({self.name}, {self.val})" def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ): return self.val < other.val class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ): A = {} A = {} A = self.build_heap(__UpperCamelCase ) def __getitem__( self :int , __UpperCamelCase :Optional[int] ): return self.get_value(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ): return (idx - 1) // 2 def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): return idx * 2 + 1 def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ): return idx * 2 + 2 def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ): return self.heap_dict[key] def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): A = len(__UpperCamelCase ) - 1 A = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): A = idx A = i.val for i in range(__UpperCamelCase , -1 , -1 ): self.sift_down(__UpperCamelCase , __UpperCamelCase ) return array def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ): while True: A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 A = self.get_right_child_idx(__UpperCamelCase ) A = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: A = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: A = r if smallest != idx: A, A = array[smallest], array[idx] ( ( A ), ( A ), ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) A = smallest else: break def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ): A = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: A, A = self.heap[idx], self.heap[p] A, A = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) A = p A = self.get_parent_idx(__UpperCamelCase ) def lowerCamelCase ( self :Any ): return self.heap[0] def lowerCamelCase ( self :Tuple ): A, A = self.heap[-1], self.heap[0] A, A = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) A = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ): self.heap.append(__UpperCamelCase ) A = len(self.heap ) - 1 A = node.val self.sift_up(len(self.heap ) - 1 ) def lowerCamelCase ( self :Tuple ): return len(self.heap ) == 0 def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" A = new_value A = new_value self.sift_up(self.idx_of_element[node] ) _snake_case : Optional[int] = Node('R', -1) _snake_case : Tuple = Node('B', 6) _snake_case : Tuple = Node('A', 3) _snake_case : Optional[int] = Node('X', 1) _snake_case : List[Any] = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _snake_case : Tuple = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
292
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case :Dict = logging.get_logger(__name__) __snake_case :str = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class _A ( lowercase_ ): UpperCamelCase__ : Union[str, Any] = '''deta''' UpperCamelCase__ : int = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=900 , __SCREAMING_SNAKE_CASE : Optional[Any]=2_048 , __SCREAMING_SNAKE_CASE : List[Any]=6 , __SCREAMING_SNAKE_CASE : Tuple=2_048 , __SCREAMING_SNAKE_CASE : str=8 , __SCREAMING_SNAKE_CASE : List[Any]=6 , __SCREAMING_SNAKE_CASE : Dict=1_024 , __SCREAMING_SNAKE_CASE : str=8 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]="sine" , __SCREAMING_SNAKE_CASE : Union[str, Any]=5 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[Any]=300 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : int=0.25 , **__SCREAMING_SNAKE_CASE : List[str] , ): '''simple docstring''' if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''') __a = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4''']) else: if isinstance(__UpperCamelCase , __UpperCamelCase): __a = backbone_config.pop('''model_type''') __a = CONFIG_MAPPING[backbone_model_type] __a = config_class.from_dict(__UpperCamelCase) __a = backbone_config __a = num_queries __a = max_position_embeddings __a = d_model __a = encoder_ffn_dim __a = encoder_layers __a = encoder_attention_heads __a = decoder_ffn_dim __a = decoder_layers __a = decoder_attention_heads __a = dropout __a = attention_dropout __a = activation_dropout __a = activation_function __a = init_std __a = init_xavier_std __a = encoder_layerdrop __a = auxiliary_loss __a = position_embedding_type # deformable attributes __a = num_feature_levels __a = encoder_n_points __a = decoder_n_points __a = two_stage __a = two_stage_num_proposals __a = with_box_refine __a = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''') # Hungarian matcher __a = class_cost __a = bbox_cost __a = giou_cost # Loss coefficients __a = mask_loss_coefficient __a = dice_loss_coefficient __a = bbox_loss_coefficient __a = giou_loss_coefficient __a = eos_coefficient __a = focal_alpha super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase) @property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return self.encoder_attention_heads @property def _lowerCamelCase ( self : str): '''simple docstring''' return self.d_model def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = copy.deepcopy(self.__dict__) __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output
49
"""simple docstring""" from __future__ import annotations _snake_case : str = [] def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): for i in range(len(UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def A__ ( UpperCamelCase , UpperCamelCase ): if row >= len(UpperCamelCase ): solution.append(UpperCamelCase ) printboard(UpperCamelCase ) print() return True for i in range(len(UpperCamelCase ) ): if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = 1 solve(UpperCamelCase , row + 1 ) A = 0 return False def A__ ( UpperCamelCase ): for i in range(len(UpperCamelCase ) ): for j in range(len(UpperCamelCase ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) _snake_case : List[str] = 8 _snake_case : List[str] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
292
0
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" snake_case = [0 for i in range(r + 1 )] # nc0 = 1 snake_case = 1 for i in range(1 ,n + 1 ): # to compute current row from previous row. snake_case = min(UpperCamelCase_ ,UpperCamelCase_ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
127
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : @staticmethod def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ): pass def A__ ( UpperCamelCase ): A = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ): A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ): A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase ) import datasets A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) A = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , __UpperCamelCase , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def lowerCamelCase ( self :Optional[Any] ): pass @slow @require_torch def lowerCamelCase ( self :Optional[Any] ): A = "Intel/dpt-large" A = pipeline("depth-estimation" , model=__UpperCamelCase ) A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) A = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def lowerCamelCase ( self :Optional[Any] ): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
292
0
"""simple docstring""" from math import isqrt, loga def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> List[Any]: _snake_case = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __lowerCamelCase , __lowerCamelCase ): _snake_case = False return [i for i in range(2 , __lowerCamelCase ) if is_prime[i]] def _UpperCAmelCase ( __lowerCamelCase : Tuple = 80_08_00 , __lowerCamelCase : Dict = 80_08_00 ) -> Optional[int]: _snake_case = degree * loga(__lowerCamelCase ) _snake_case = int(__lowerCamelCase ) _snake_case = calculate_prime_numbers(__lowerCamelCase ) _snake_case = 0 _snake_case = 0 _snake_case = len(__lowerCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"{solution() = }")
288
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : UpperCamelCase = PegasusConfig UpperCamelCase = {} UpperCamelCase = '''gelu''' def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ): A = parent A = batch_size A = seq_length A = is_training A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = eos_token_id A = pad_token_id A = bos_token_id def lowerCamelCase ( self :Tuple ): A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A = tf.concat([input_ids, eos_tensor] , axis=1 ) A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return config, inputs_dict def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ): A = TFPegasusModel(config=__UpperCamelCase ).get_decoder() A = inputs_dict["input_ids"] A = input_ids[:1, :] A = inputs_dict["attention_mask"][:1, :] A = inputs_dict["head_mask"] A = 1 # first forward pass A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase ) A, A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) , config.vocab_size ) A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A = tf.concat([input_ids, next_tokens] , axis=-1 ) A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0] A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A = output_from_no_past[:, -3:, random_slice_idx] A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ): if attention_mask is None: A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCamelCase = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :int ): A = TFPegasusModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase ) def lowerCamelCase ( self :Dict ): self.config_tester.run_common_tests() def lowerCamelCase ( self :Any ): A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] UpperCamelCase = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCamelCase = '''google/pegasus-xsum''' @cached_property def lowerCamelCase ( self :Any ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCamelCase ( self :Dict ): A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCamelCase ( self :str , **__UpperCamelCase :str ): A = self.translate_src_text(**__UpperCamelCase ) assert self.expected_text == generated_words def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ): A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" ) A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , ) A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase ) return generated_words @slow def lowerCamelCase ( self :Union[str, Any] ): self._assert_generated_batch_equal_expected()
292
0
from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def A ( _lowercase = "laptop" ): SCREAMING_SNAKE_CASE : int = f"""https://www.amazon.in/laptop/s?k={product}""" SCREAMING_SNAKE_CASE : List[str] = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } SCREAMING_SNAKE_CASE : List[str] = BeautifulSoup(requests.get(_lowercase , headers=_lowercase ).text ) # Initialize a Pandas dataframe with the column titles SCREAMING_SNAKE_CASE : Optional[int] = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: SCREAMING_SNAKE_CASE : str = item.ha.text SCREAMING_SNAKE_CASE : Optional[int] = '''https://www.amazon.in/''' + item.ha.a['''href'''] SCREAMING_SNAKE_CASE : List[Any] = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: SCREAMING_SNAKE_CASE : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: SCREAMING_SNAKE_CASE : str = '''Not available''' try: SCREAMING_SNAKE_CASE : Tuple = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' try: SCREAMING_SNAKE_CASE : Tuple = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: SCREAMING_SNAKE_CASE : List[Any] = float('''nan''' ) except AttributeError: pass SCREAMING_SNAKE_CASE : List[Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] SCREAMING_SNAKE_CASE : Any = ''' ''' SCREAMING_SNAKE_CASE : Optional[Any] = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": __UpperCamelCase : Optional[int] = 'headphones' get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
182
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def A__ ( UpperCamelCase = "laptop" ): A = F"https://www.amazon.in/laptop/s?k={product}" A = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text ) # Initialize a Pandas dataframe with the column titles A = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ): try: A = item.ha.text A = "https://www.amazon.in/" + item.ha.a["href"] A = item.find("span" , attrs={"class": "a-offscreen"} ).text try: A = item.find("span" , attrs={"class": "a-icon-alt"} ).text except AttributeError: A = "Not available" try: A = ( "₹" + item.find( "span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: A = "" try: A = float( ( ( float(product_mrp.strip("₹" ).replace("," , "" ) ) - float(product_price.strip("₹" ).replace("," , "" ) ) ) / float(product_mrp.strip("₹" ).replace("," , "" ) ) ) * 100 ) except ValueError: A = float("nan" ) except AttributeError: pass A = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A = " " A = " " data_frame.index += 1 return data_frame if __name__ == "__main__": _snake_case : Optional[int] = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
292
0
import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : List[Any], lowerCamelCase : str, lowerCamelCase : Tuple=13, lowerCamelCase : List[str]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Dict=True, lowerCamelCase : str=99, lowerCamelCase : Any=64, lowerCamelCase : int=5, lowerCamelCase : Dict=4, lowerCamelCase : Tuple=64, lowerCamelCase : Union[str, Any]="gelu", lowerCamelCase : List[str]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : List[Any]=512, lowerCamelCase : List[Any]=16, lowerCamelCase : str=2, lowerCamelCase : str=0.02, lowerCamelCase : Dict=3, lowerCamelCase : Optional[int]=4, lowerCamelCase : Dict=None, ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_labels lowercase__ = num_choices lowercase__ = scope def lowercase__ ( self : str ): '''simple docstring''' return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' ) def lowercase__ ( self : Dict ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size ) lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowercase__ = ids_tensor([self.batch_size], self.num_choices ) lowercase__ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' return MPNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def lowercase__ ( self : str, lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : List[str] ): '''simple docstring''' lowercase__ = MPNetModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase__ = model(__UpperCamelCase, __UpperCamelCase ) lowercase__ = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def lowercase__ ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : List[Any] ): '''simple docstring''' lowercase__ = MPNetForQuestionAnswering(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase__ = model( __UpperCamelCase, attention_mask=__UpperCamelCase, start_positions=__UpperCamelCase, end_positions=__UpperCamelCase, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Tuple, lowerCamelCase : List[Any] ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = MPNetForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase__ = model(__UpperCamelCase, attention_mask=__UpperCamelCase, labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def lowercase__ ( self : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : int ): '''simple docstring''' lowercase__ = self.num_choices lowercase__ = MPNetForMultipleChoice(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowercase__ = model( __UpperCamelCase, attention_mask=__UpperCamelCase, labels=__UpperCamelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self : str, lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : Any, lowerCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = MPNetForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase__ = model(__UpperCamelCase, attention_mask=__UpperCamelCase, labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[int] ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase_ ,lowercase_ ,unittest.TestCase ): """simple docstring""" lowercase__ = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) lowercase__ = ( { """feature-extraction""": MPNetModel, """fill-mask""": MPNetForMaskedLM, """question-answering""": MPNetForQuestionAnswering, """text-classification""": MPNetForSequenceClassification, """token-classification""": MPNetForTokenClassification, """zero-shot""": MPNetForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = True def lowercase__ ( self : Tuple ): '''simple docstring''' lowercase__ = MPNetModelTester(self ) lowercase__ = ConfigTester(self, config_class=__UpperCamelCase, hidden_size=37 ) def lowercase__ ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : Tuple ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__UpperCamelCase ) def lowercase__ ( self : List[str] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__UpperCamelCase ) def lowercase__ ( self : int ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__UpperCamelCase ) def lowercase__ ( self : str ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__UpperCamelCase ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase__ ( self : List[Any] ): '''simple docstring''' lowercase__ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' ) lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) lowercase__ = model(__UpperCamelCase )[0] lowercase__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape, __UpperCamelCase ) lowercase__ = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3], __UpperCamelCase, atol=1E-4 ) )
207
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging _snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( lowercase_ ): def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ): super().__init__() if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ): if slice_size == "auto": A = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def lowerCamelCase ( self :Tuple ): self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ): A = self.speech_processor.feature_extractor( __UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device ) A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 ) A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[ 0 ] if isinstance(__UpperCamelCase , __UpperCamelCase ): A = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = len(__UpperCamelCase ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(__UpperCamelCase )}." ) # get prompt text embeddings A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) A = text_input_ids[:, : self.tokenizer.model_max_length] A = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A, A, A = text_embeddings.shape A = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A = 42 if negative_prompt is None: A = [""] * batch_size elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=" f" {type(__UpperCamelCase )}." ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): A = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: A = negative_prompt A = text_input_ids.shape[-1] A = self.tokenizer( __UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , ) A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A = uncond_embeddings.shape[1] A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 ) A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to( self.device ) else: A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A = {} if accepts_eta: A = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: A, A = noise_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = 1 / 0.18_215 * latents A = self.vae.decode(__UpperCamelCase ).sample A = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
292
0
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class __snake_case ( lowercase_ , unittest.TestCase ): __lowerCamelCase : Tuple = PriorTransformer __lowerCamelCase : Dict = """hidden_states""" @property def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : str =4 UpperCAmelCase : Tuple =8 UpperCAmelCase : List[Any] =7 UpperCAmelCase : str =floats_tensor((batch_size, embedding_dim) ).to(__UpperCamelCase ) UpperCAmelCase : Any =floats_tensor((batch_size, embedding_dim) ).to(__UpperCamelCase ) UpperCAmelCase : Optional[Any] =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__UpperCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def UpperCAmelCase__ ( self , snake_case__=0 ) -> Tuple: '''simple docstring''' torch.manual_seed(__UpperCamelCase ) UpperCAmelCase : int =4 UpperCAmelCase : List[Any] =8 UpperCAmelCase : Optional[int] =7 UpperCAmelCase : str =torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase ) UpperCAmelCase : Optional[Any] =torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase ) UpperCAmelCase : List[str] =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__UpperCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return (4, 8) @property def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' return (4, 8) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Optional[int] ={ '''num_attention_heads''': 2, '''attention_head_dim''': 4, '''num_layers''': 2, '''embedding_dim''': 8, '''num_embeddings''': 7, '''additional_embeddings''': 4, } UpperCAmelCase : Optional[Any] =self.dummy_input return init_dict, inputs_dict def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Optional[int] =PriorTransformer.from_pretrained( '''hf-internal-testing/prior-dummy''' , output_loading_info=__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(__UpperCamelCase ) UpperCAmelCase : List[str] =model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any =self.prepare_init_args_and_inputs_for_common() UpperCAmelCase : Union[str, Any] =self.model_class(**__UpperCamelCase ) UpperCAmelCase : Optional[Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : List[str] =[*signature.parameters.keys()] UpperCAmelCase : int =['''hidden_states''', '''timestep'''] self.assertListEqual(arg_names[:2] , __UpperCamelCase ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : str =PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' ) UpperCAmelCase : Tuple =model.to(__UpperCamelCase ) if hasattr(__UpperCamelCase , '''set_default_attn_processor''' ): model.set_default_attn_processor() UpperCAmelCase : List[Any] =self.get_dummy_seed_input() with torch.no_grad(): UpperCAmelCase : List[Any] =model(**__UpperCamelCase )[0] UpperCAmelCase : Dict =output[0, :5].flatten().cpu() print(__UpperCamelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. UpperCAmelCase : Dict =torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] ) self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-2 ) ) @slow class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self , snake_case__=1 , snake_case__=768 , snake_case__=77 , snake_case__=0 ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(__UpperCamelCase ) UpperCAmelCase : str =batch_size UpperCAmelCase : str =embedding_dim UpperCAmelCase : List[str] =num_embeddings UpperCAmelCase : Dict =torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase ) UpperCAmelCase : List[str] =torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase ) UpperCAmelCase : str =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__UpperCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ] ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[str]: '''simple docstring''' UpperCAmelCase : str =PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' ) model.to(__UpperCamelCase ) UpperCAmelCase : Optional[Any] =self.get_dummy_seed_input(seed=__UpperCamelCase ) with torch.no_grad(): UpperCAmelCase : int =model(**__UpperCamelCase )[0] assert list(sample.shape ) == [1, 768] UpperCAmelCase : Optional[Any] =sample[0, :8].flatten().cpu() print(__UpperCamelCase ) UpperCAmelCase : List[str] =torch.tensor(__UpperCamelCase ) assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
348
"""simple docstring""" _snake_case : Optional[int] = [ 'DownloadConfig', 'DownloadManager', 'DownloadMode', 'StreamingDownloadManager', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
292
0
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( lowercase_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =LongformerTokenizer UpperCamelCase_ : str =True UpperCamelCase_ : Optional[Any] =LongformerTokenizerFast UpperCamelCase_ : Any =True def _A (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowercase= [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __lowercase= dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) __lowercase= ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __lowercase= {'unk_token': '<unk>'} __lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) def _A (self , **lowerCAmelCase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def _A (self , **lowerCAmelCase ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def _A (self , lowerCAmelCase ): __lowercase= 'lower newer' __lowercase= 'lower newer' return input_text, output_text def _A (self ): __lowercase= self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowercase= 'lower newer' __lowercase= ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] __lowercase= tokenizer.tokenize(__UpperCamelCase ) # , add_prefix_space=True) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) __lowercase= tokens + [tokenizer.unk_token] __lowercase= [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def _A (self ): __lowercase= self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , ) @slow def _A (self ): __lowercase= self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) __lowercase= tokenizer.encode('sequence builders' , add_special_tokens=__UpperCamelCase ) __lowercase= tokenizer.encode('multi-sequence build' , add_special_tokens=__UpperCamelCase ) __lowercase= tokenizer.encode( 'sequence builders' , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase ) __lowercase= tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase ) __lowercase= tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ) __lowercase= tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _A (self ): __lowercase= self.get_tokenizer() __lowercase= 'Encode this sequence.' __lowercase= tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments __lowercase= tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase ) __lowercase= tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__UpperCamelCase , __UpperCamelCase ) __lowercase= tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase ) __lowercase= tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) __lowercase= tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) __lowercase= tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__UpperCamelCase , __UpperCamelCase ) # Testing spaces after special tokens __lowercase= '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )} ) # mask token has a left space __lowercase= tokenizer.convert_tokens_to_ids(__UpperCamelCase ) __lowercase= 'Encode <mask> sequence' __lowercase= 'Encode <mask>sequence' __lowercase= tokenizer.encode(__UpperCamelCase ) __lowercase= encoded.index(__UpperCamelCase ) __lowercase= tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) __lowercase= tokenizer.encode(__UpperCamelCase ) __lowercase= encoded.index(__UpperCamelCase ) __lowercase= tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__UpperCamelCase , __UpperCamelCase ) def _A (self ): pass def _A (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __lowercase= self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) __lowercase= self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) __lowercase= 'A, <mask> AllenNLP sentence.' __lowercase= tokenizer_r.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase ) __lowercase= tokenizer_p.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) __lowercase= tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) __lowercase= tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( __UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( __UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def _A (self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __lowercase= self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase ) __lowercase= json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __lowercase= json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __UpperCamelCase ) self.assertEqual(post_processor_state['add_prefix_space'] , __UpperCamelCase ) self.assertEqual(post_processor_state['trim_offsets'] , __UpperCamelCase ) def _A (self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __lowercase= 'hello' # `hello` is a token in the vocabulary of `pretrained_name` __lowercase= f'{text_of_1_token} {text_of_1_token}' __lowercase= self.rust_tokenizer_class.from_pretrained( __UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase ) __lowercase= tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCamelCase ) + 1, len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , ) __lowercase= self.rust_tokenizer_class.from_pretrained( __UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase ) __lowercase= tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCamelCase ) + 1, len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , ) __lowercase= self.rust_tokenizer_class.from_pretrained( __UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase ) __lowercase= tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCamelCase ), len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , ) __lowercase= self.rust_tokenizer_class.from_pretrained( __UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase ) __lowercase= tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCamelCase ), len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , ) __lowercase= f' {text}' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __lowercase= self.rust_tokenizer_class.from_pretrained( __UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase ) __lowercase= tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCamelCase ) + 1, 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , ) __lowercase= self.rust_tokenizer_class.from_pretrained( __UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase ) __lowercase= tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCamelCase ), 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , ) __lowercase= self.rust_tokenizer_class.from_pretrained( __UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase ) __lowercase= tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCamelCase ), 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
295
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def A__ ( UpperCamelCase ): A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def A__ ( UpperCamelCase ): A = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: A = s_dict.pop(UpperCamelCase ) elif "subsample" in key: A = s_dict.pop(UpperCamelCase ) def A__ ( UpperCamelCase ): A, A = emb.weight.shape A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) A = emb.weight.data return lin_layer def A__ ( UpperCamelCase , UpperCamelCase ): A = torch.load(UpperCamelCase , map_location="cpu" ) A = mam_aaa["args"] A = mam_aaa["model"] A = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(UpperCamelCase ) rename_keys(UpperCamelCase ) A = state_dict["decoder.embed_tokens.weight"].shape[0] A = args.share_decoder_input_output_embed A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )] A = SpeechaTextConfig( vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , ) A = SpeechaTextForConditionalGeneration(UpperCamelCase ) A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F" but all the following weights are missing {missing}" ) if tie_embeds: A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: A = lm_head_weights model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _snake_case : str = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
292
0
"""simple docstring""" import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : int = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'} lowercase__ : Optional[Any] = { 'vocab_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt', }, 'emoji_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json', }, } lowercase__ : int = { 'abeja/gpt-neox-japanese-2.7b': 2_0_4_8, } def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ) -> str: """simple docstring""" with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f: lowerCAmelCase_ : Tuple = json.loads(f.read() ) lowerCAmelCase_ : List[str] = collections.OrderedDict() lowerCAmelCase_ : int = collections.OrderedDict() lowerCAmelCase_ : Any = collections.OrderedDict() with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f: lowerCAmelCase_ : Dict = f.readlines() lowerCAmelCase_ : str = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(lowerCAmelCase__ ): lowerCAmelCase_ : List[Any] = b lowerCAmelCase_ : Optional[int] = idx for wd in b: lowerCAmelCase_ : int = idx return vocab, raw_vocab, ids_to_tokens, emoji class UpperCamelCase__ ( lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""] def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : List[str]="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : Any="<|startoftext|>" , SCREAMING_SNAKE_CASE_ : List[Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : Optional[int]=False , **SCREAMING_SNAKE_CASE_ : Dict , ): super().__init__( unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , do_clean_text=__UpperCamelCase , **__UpperCamelCase , ) if not os.path.isfile(__UpperCamelCase ): raise ValueError( F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) if not os.path.isfile(__UpperCamelCase ): raise ValueError( F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google" ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) lowerCAmelCase_ : Optional[int] = do_clean_text lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = load_vocab_and_emoji(__UpperCamelCase , __UpperCamelCase ) lowerCAmelCase_ : str = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): return dict(self.raw_vocab , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ): return self.subword_tokenizer.tokenize(__UpperCamelCase , clean=self.do_clean_text ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): return self.vocab.get(__UpperCamelCase , self.vocab.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ): return self.subword_tokenizer.convert_id_to_token(__UpperCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ): lowerCAmelCase_ : Any = ''.join(__UpperCamelCase ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : "Conversation" ): lowerCAmelCase_ : Any = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] ) if len(__UpperCamelCase ) > self.model_max_length: lowerCAmelCase_ : Optional[int] = input_ids[-self.model_max_length :] return input_ids def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ): lowerCAmelCase_ : Dict = 0 if os.path.isdir(__UpperCamelCase ): lowerCAmelCase_ : Union[str, Any] = os.path.join( __UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase_ : Any = os.path.join( __UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] ) else: lowerCAmelCase_ : List[Any] = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase_ : Optional[int] = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase_ : int = token_index writer.write(','.join(__UpperCamelCase ) + '\n' ) index += 1 with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as writer: json.dump(self.emoji , __UpperCamelCase ) return vocab_file, emoji_file class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ): lowerCAmelCase_ : Optional[int] = vocab # same as swe lowerCAmelCase_ : Tuple = ids_to_tokens # same as bpe lowerCAmelCase_ : int = emoji lowerCAmelCase_ : List[str] = np.max([len(__UpperCamelCase ) for w in self.vocab.keys()] ) lowerCAmelCase_ : Dict = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' ) lowerCAmelCase_ : Dict = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' ) lowerCAmelCase_ : Optional[int] = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' ) lowerCAmelCase_ : Optional[Any] = re.compile( r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCAmelCase_ : List[Any] = re.compile( r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCAmelCase_ : Any = re.compile( r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' ) lowerCAmelCase_ : int = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' lowerCAmelCase_ : Any = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' lowerCAmelCase_ : List[Any] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} ) def __len__( self : Any ): return len(self.ids_to_tokens ) def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Any ): lowerCAmelCase_ : Union[str, Any] = self.content_repattera.sub('<URL>' , __UpperCamelCase ) lowerCAmelCase_ : Dict = self.content_repattera.sub('<EMAIL>' , __UpperCamelCase ) lowerCAmelCase_ : Union[str, Any] = self.content_repattera.sub('<TEL>' , __UpperCamelCase ) lowerCAmelCase_ : Optional[Any] = self.content_repattera.sub('<DATE>' , __UpperCamelCase ) lowerCAmelCase_ : List[Any] = self.content_repattera.sub('<DATE>' , __UpperCamelCase ) lowerCAmelCase_ : Tuple = self.content_repattera.sub('<PRICE>' , __UpperCamelCase ) lowerCAmelCase_ : Dict = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: lowerCAmelCase_ : Optional[int] = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' ) return content def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple=False ): lowerCAmelCase_ : List[str] = text.replace(' ' , '<SP>' ) lowerCAmelCase_ : Optional[Any] = text.replace(' ' , '<SP>' ) lowerCAmelCase_ : Optional[int] = text.replace('\r\n' , '<BR>' ) lowerCAmelCase_ : Tuple = text.replace('\n' , '<BR>' ) lowerCAmelCase_ : Optional[int] = text.replace('\r' , '<BR>' ) lowerCAmelCase_ : Tuple = text.replace('\t' , '<TAB>' ) lowerCAmelCase_ : int = text.replace('—' , 'ー' ) lowerCAmelCase_ : int = text.replace('−' , 'ー' ) for k, v in self.emoji["emoji"].items(): if k in text: lowerCAmelCase_ : Union[str, Any] = text.replace(__UpperCamelCase , __UpperCamelCase ) if clean: lowerCAmelCase_ : int = self.clean_text(__UpperCamelCase ) def check_simbol(SCREAMING_SNAKE_CASE_ : List[str] ): lowerCAmelCase_ : Union[str, Any] = x.encode() if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 2: lowerCAmelCase_ : str = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0XC2_A1 and c <= 0XC2_BF) or (c >= 0XC7_80 and c <= 0XC7_83) or (c >= 0XCA_B9 and c <= 0XCB_BF) or (c >= 0XCC_80 and c <= 0XCD_A2) ): return True return False def checkuae(SCREAMING_SNAKE_CASE_ : Any ): lowerCAmelCase_ : Dict = x.encode() if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 3: lowerCAmelCase_ : List[str] = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0XE2_80_80 and c <= 0XE2_B0_7F: return True return False lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Union[str, Any] = [] while pos < len(__UpperCamelCase ): lowerCAmelCase_ : Optional[int] = min(len(__UpperCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3 lowerCAmelCase_ : Dict = [] # (token_id, token, pos) for e in range(__UpperCamelCase , __UpperCamelCase , -1 ): lowerCAmelCase_ : Optional[Any] = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(__UpperCamelCase ) > 2: lowerCAmelCase_ : Any = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(__UpperCamelCase ) > 0: # the smallest token_id is adopted lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Any = sorted(__UpperCamelCase , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )[0] result.append(__UpperCamelCase ) lowerCAmelCase_ : Union[str, Any] = e else: lowerCAmelCase_ : Optional[int] = pos + 1 lowerCAmelCase_ : List[Any] = text[pos:end] if check_simbol(__UpperCamelCase ): result.append('<KIGOU>' ) elif checkuae(__UpperCamelCase ): result.append('<U2000U2BFF>' ) else: for i in wd.encode('utf-8' ): result.append('<|byte%d|>' % i ) lowerCAmelCase_ : List[str] = end return result def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str]="\n" ): lowerCAmelCase_ : str = [] lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : List[str] = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(__UpperCamelCase ) > 0: words.append(bytearray(__UpperCamelCase ).decode('utf-8' , errors='replace' ) ) lowerCAmelCase_ : Optional[Any] = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word] ) elif word == "<SP>": words.append(' ' ) elif word == "<BR>": words.append(__UpperCamelCase ) elif word == "<TAB>": words.append('\t' ) elif word == "<BLOCK>": words.append('▀' ) elif word == "<KIGOU>": words.append('ǀ' ) elif word == "<U2000U2BFF>": words.append('‖' ) else: words.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: words.append(bytearray(__UpperCamelCase ).decode('utf-8' , errors='replace' ) ) lowerCAmelCase_ : int = ''.join(__UpperCamelCase ) return text
224
"""simple docstring""" from math import isqrt, loga def A__ ( UpperCamelCase ): A = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCamelCase , UpperCamelCase ): A = False return [i for i in range(2 , UpperCamelCase ) if is_prime[i]] def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ): A = degree * loga(UpperCamelCase ) A = int(UpperCamelCase ) A = calculate_prime_numbers(UpperCamelCase ) A = 0 A = 0 A = len(UpperCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"""{solution() = }""")
292
0
def a__ ( A_, A_ ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) __magic_name__ = str(bin(A_ ) )[2:] # remove the leading "0b" __magic_name__ = str(bin(A_ ) )[2:] # remove the leading "0b" __magic_name__ = max(len(A_ ), len(A_ ) ) return "0b" + "".join( str(int(char_a == """1""" and char_b == """1""" ) ) for char_a, char_b in zip(a_binary.zfill(A_ ), b_binary.zfill(A_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
88
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _snake_case : Union[str, Any] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
0
"""simple docstring""" import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _snake_case ( _snake_case : str ) -> Dict: # picklable for multiprocessing '''simple docstring''' return x.sum() def _snake_case ( _snake_case : int ) -> Any: # picklable for multiprocessing '''simple docstring''' return i + 1 @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : int = 42 UpperCAmelCase : Tuple = 42 class lowercase_ ( lowercase_ ): '''simple docstring''' def lowerCAmelCase_ ( self : Any ): _A = {} _A = [] _A = 1 _A = [1, 2] _A = {'a': 1, 'b': 2} _A = {'a': [1, 2], 'b': [3, 4]} _A = {'a': {'1': 1}, 'b': 2} _A = {'a': 1, 'b': 2, 'c': 3, 'd': 4} _A = {} _A = [] _A = 2 _A = [2, 3] _A = {'a': 2, 'b': 3} _A = {'a': [2, 3], 'b': [4, 5]} _A = {'a': {'1': 2}, 'b': 3} _A = {'a': 2, 'b': 3, 'c': 4, 'd': 5} self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) _A = 2 self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) _A = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )} _A = {'a': 2, 'b': 0, 'c': 2} _A = { 'a': np.eye(2 ).astype(__UpperCamelCase ), 'b': np.zeros(3 ).astype(__UpperCamelCase ), 'c': np.ones(2 ).astype(__UpperCamelCase ), } self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase , num_proc=__UpperCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(__UpperCamelCase ): # can't pickle a local lambda map_nested(lambda _UpperCAmelCase : x + 1 , __UpperCamelCase , num_proc=__UpperCamelCase ) def lowerCAmelCase_ ( self : Dict ): _A = {'a': 1, 'b': 2} _A = {'a': 3, 'b': 4} _A = {'a': 5, 'b': 6} _A = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ) , __UpperCamelCase ) def lowerCAmelCase_ ( self : List[Any] ): class lowercase_ : '''simple docstring''' UpperCAmelCase : List[Any] = '''bar''' _A = Foo() self.assertEqual(foo.my_attr , 'bar' ) with temporary_assignment(__UpperCamelCase , 'my_attr' , 'BAR' ): self.assertEqual(foo.my_attr , 'BAR' ) self.assertEqual(foo.my_attr , 'bar' ) @pytest.mark.parametrize( 'iterable_length, num_proc, expected_num_proc' , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def _snake_case ( _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Any ) -> List[str]: '''simple docstring''' with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch( 'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool: _A = {F'''{i}''': i for i in range(_snake_case )} _A = map_nested(lambda _snake_case : x + 10 , _snake_case , num_proc=_snake_case , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class lowercase_ ( lowercase_ ): '''simple docstring''' @require_tf def lowerCAmelCase_ ( self : str ): import tensorflow as tf from tensorflow.keras import layers _A = layers.Dense(2 ) def gen_random_output(): _A = tf.random.uniform((1, 3) ) return model(__UpperCamelCase ).numpy() with temp_seed(42 , set_tensorflow=__UpperCamelCase ): _A = gen_random_output() with temp_seed(42 , set_tensorflow=__UpperCamelCase ): _A = gen_random_output() _A = gen_random_output() np.testing.assert_equal(__UpperCamelCase , __UpperCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def lowerCAmelCase_ ( self : Optional[int] ): import torch def gen_random_output(): _A = torch.nn.Linear(3 , 2 ) _A = torch.rand(1 , 3 ) return model(__UpperCamelCase ).detach().numpy() with temp_seed(42 , set_pytorch=__UpperCamelCase ): _A = gen_random_output() with temp_seed(42 , set_pytorch=__UpperCamelCase ): _A = gen_random_output() _A = gen_random_output() np.testing.assert_equal(__UpperCamelCase , __UpperCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def lowerCAmelCase_ ( self : Tuple ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): _A = gen_random_output() with temp_seed(42 ): _A = gen_random_output() _A = gen_random_output() np.testing.assert_equal(__UpperCamelCase , __UpperCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('input_data' , [{}] ) def _snake_case ( _snake_case : Optional[int] ) -> Any: '''simple docstring''' _A = NestedDataStructure(_snake_case ).data assert output_data == input_data @pytest.mark.parametrize( 'data, expected_output' , [ ({}, []), ([], []), ('foo', ['foo']), (['foo', 'bar'], ['foo', 'bar']), ([['foo', 'bar']], ['foo', 'bar']), ([[['foo'], ['bar']]], ['foo', 'bar']), ([[['foo'], 'bar']], ['foo', 'bar']), ({'a': 1, 'b': 2}, [1, 2]), ({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]), ({'a': {'1': 1}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': [2]}, [1, 2]), ] , ) def _snake_case ( _snake_case : int , _snake_case : int ) -> str: '''simple docstring''' _A = NestedDataStructure(_snake_case ).flatten() assert output == expected_output def _snake_case ( ) -> Optional[Any]: '''simple docstring''' _A = A(x=1 , y='foobar' ) _A = {'x': 1, 'y': 'foobar'} assert asdict(_snake_case ) == expected_output _A = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]} _A = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]} assert asdict(_snake_case ) == expected_output with pytest.raises(_snake_case ): asdict([1, A(x=10 , y='foo' )] ) def _snake_case ( _snake_case : Optional[int] ) -> Optional[int]: '''simple docstring''' return text.split() def _snake_case ( _snake_case : List[Any] ) -> Optional[Any]: '''simple docstring''' yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _snake_case ( ) -> Optional[Any]: '''simple docstring''' with Pool(2 ) as pool: _A = list(iflatmap_unordered(_snake_case , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) ) assert out.count('hello' ) == 10 assert out.count('there' ) == 10 assert len(_snake_case ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _A = list(iflatmap_unordered(_snake_case , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) ) assert out.count('hello' ) == 10 assert out.count('there' ) == 10 assert len(_snake_case ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: _A = [] for yield_time, content in iflatmap_unordered( _snake_case , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(_snake_case ) assert out.count('a' ) == 2 assert out.count('b' ) == 2 assert len(_snake_case ) == 4
315
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : int = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''marian''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = vocab_size A = decoder_vocab_size or vocab_size A = max_position_embeddings A = d_model A = encoder_ffn_dim A = encoder_layers A = encoder_attention_heads A = decoder_ffn_dim A = decoder_layers A = decoder_attention_heads A = dropout A = attention_dropout A = activation_dropout A = activation_function A = init_std A = encoder_layerdrop A = decoder_layerdrop A = use_cache A = encoder_layers A = scale_embedding # scale factor will be sqrt(d_model) if True A = share_encoder_decoder_embeddings super().__init__( pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , ) class _UpperCAmelCase ( lowercase_ ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A = {0: "batch"} A = {0: "batch", 1: "past_decoder_sequence + sequence"} else: A = {0: "batch", 1: "decoder_sequence"} A = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} else: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = super().outputs else: A = super(__UpperCamelCase , self ).outputs if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Generate decoder inputs A = seq_length if not self.use_past else 1 A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} A = dict(**__UpperCamelCase , **__UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape A = common_inputs["decoder_input_ids"].shape[1] A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A = decoder_seq_length + 3 A = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 ) A = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered A, A = self.num_layers A = min(__UpperCamelCase , __UpperCamelCase ) A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__UpperCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), ) ) # TODO: test this. A = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__UpperCamelCase , __UpperCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) ) return common_inputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape # Not using the same length for past_key_values A = seqlen + 2 A, A = self.num_layers A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A = common_inputs["attention_mask"].dtype A = torch.cat( [common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 ) A = [ (torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase ) ] return common_inputs def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A = tokenizer.num_special_tokens_to_add(__UpperCamelCase ) A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: A = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) else: A = self._generate_dummy_inputs_for_causal_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ): if self.task in ["default", "seq2seq-lm"]: A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: A = super(__UpperCamelCase , self )._flatten_past_key_values_( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @property def lowerCamelCase ( self :List[str] ): return 1e-4
292
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'], 'convert_funnel_original_tf_checkpoint_to_pytorch': [], 'tokenization_funnel': ['FunnelTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['FunnelTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST', 'FunnelBaseModel', 'FunnelForMaskedLM', 'FunnelForMultipleChoice', 'FunnelForPreTraining', 'FunnelForQuestionAnswering', 'FunnelForSequenceClassification', 'FunnelForTokenClassification', 'FunnelModel', 'FunnelPreTrainedModel', 'load_tf_weights_in_funnel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFFunnelBaseModel', 'TFFunnelForMaskedLM', 'TFFunnelForMultipleChoice', 'TFFunnelForPreTraining', 'TFFunnelForQuestionAnswering', 'TFFunnelForSequenceClassification', 'TFFunnelForTokenClassification', 'TFFunnelModel', 'TFFunnelPreTrainedModel', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
153
"""simple docstring""" # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def A__ ( UpperCamelCase ): A = [False] * len(UpperCamelCase ) A = [-1] * len(UpperCamelCase ) def dfs(UpperCamelCase , UpperCamelCase ): A = True A = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase , 1 - c ) for i in range(len(UpperCamelCase ) ): if not visited[i]: dfs(UpperCamelCase , 0 ) for i in range(len(UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
292
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __snake_case :str = logging.get_logger(__name__) class _A ( lowercase_ ): UpperCamelCase__ : Optional[Any] = ['''pixel_values'''] def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ): '''simple docstring''' super().__init__(**__UpperCamelCase) __a = size if size is not None else {'''height''': 224, '''width''': 224} __a = get_size_dict(__UpperCamelCase) __a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __a = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name='''crop_size''') __a = do_resize __a = do_rescale __a = do_normalize __a = do_center_crop __a = crop_size __a = size __a = resample __a = rescale_factor __a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __a = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : int , ): '''simple docstring''' __a = get_size_dict(__UpperCamelCase) if "shortest_edge" in size: __a = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: __a = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}') return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase) def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ): '''simple docstring''' __a = get_size_dict(__UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}') return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase) def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' __a = do_resize if do_resize is not None else self.do_resize __a = do_rescale if do_rescale is not None else self.do_rescale __a = do_normalize if do_normalize is not None else self.do_normalize __a = do_center_crop if do_center_crop is not None else self.do_center_crop __a = crop_size if crop_size is not None else self.crop_size __a = get_size_dict(__UpperCamelCase , param_name='''crop_size''' , default_to_square=__UpperCamelCase) __a = resample if resample is not None else self.resample __a = rescale_factor if rescale_factor is not None else self.rescale_factor __a = image_mean if image_mean is not None else self.image_mean __a = image_std if image_std is not None else self.image_std __a = size if size is not None else self.size __a = get_size_dict(__UpperCamelCase) if not is_batched(__UpperCamelCase): __a = [images] if not valid_images(__UpperCamelCase): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') # All transformations expect numpy arrays. __a = [to_numpy_array(__UpperCamelCase) for image in images] if do_resize: __a = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase) for image in images] if do_center_crop: __a = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase) for image in images] if do_rescale: __a = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase) for image in images] if do_normalize: __a = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase) for image in images] __a = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase) for image in images] __a = {'''pixel_values''': images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase)
49
"""simple docstring""" from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _UpperCAmelCase ( lowercase_ ): def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ): A = 1.0 if scale is None else scale A = 0.0 if loc is None else loc super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] ) @property def lowerCamelCase ( self :Any ): return self.base_dist.mean * self.scale + self.loc @property def lowerCamelCase ( self :Optional[int] ): return self.base_dist.variance * self.scale**2 @property def lowerCamelCase ( self :Dict ): return self.variance.sqrt() class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ): super().__init__(**__UpperCamelCase ) A = args_dim A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] ) A = domain_map def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ): A = [proj(__UpperCamelCase ) for proj in self.proj] return self.domain_map(*__UpperCamelCase ) class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int ): super().__init__() A = function def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ): return self.function(__UpperCamelCase , *__UpperCamelCase ) class _UpperCAmelCase : UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 def __init__( self :Any , __UpperCamelCase :int = 1 ): A = dim A = {k: dim * self.args_dim[k] for k in self.args_dim} def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ): if self.dim == 1: return self.distribution_class(*__UpperCamelCase ) else: return Independent(self.distribution_class(*__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ): A = self._base_distribution(__UpperCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim ) @property def lowerCamelCase ( self :List[Any] ): return () if self.dim == 1 else (self.dim,) @property def lowerCamelCase ( self :Tuple ): return len(self.event_shape ) @property def lowerCamelCase ( self :int ): return 0.0 def lowerCamelCase ( self :str , __UpperCamelCase :int ): return ParameterProjection( in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ): raise NotImplementedError() @staticmethod def lowerCamelCase ( __UpperCamelCase :torch.Tensor ): return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0 class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"df": 1, "loc": 1, "scale": 1} UpperCamelCase = StudentT @classmethod def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) A = 2.0 + cls.squareplus(__UpperCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"loc": 1, "scale": 1} UpperCamelCase = Normal @classmethod def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"total_count": 1, "logits": 1} UpperCamelCase = NegativeBinomial @classmethod def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ): A, A = distr_args if self.dim == 1: return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) else: return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ): A, A = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
292
0
from __future__ import annotations import requests _SCREAMING_SNAKE_CASE : Dict = set( "approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split() ) def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ = 1 ,UpperCamelCase_ = "new" ,UpperCamelCase_ = None ): """simple docstring""" snake_case = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase_ ) - valid_terms ) ): snake_case = F'''Invalid search term: {invalid_search_terms}''' raise ValueError(UpperCamelCase_ ) snake_case = requests.get( F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' ,headers={'''User-agent''': '''A random string'''} ,) if response.status_code == 4_29: raise requests.HTTPError snake_case = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase_ )} snake_case = {} for id_ in range(UpperCamelCase_ ): snake_case = { item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
127
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class _UpperCAmelCase : UpperCamelCase = None def lowerCamelCase ( self :List[Any] ): A = self.feature_extraction_class(**self.feat_extract_dict ) A = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(__UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(__UpperCamelCase ) A = self.feature_extraction_class.from_json_file(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase ( self :Dict ): A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) A = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase ( self :Tuple ): A = self.feature_extraction_class() self.assertIsNotNone(__UpperCamelCase )
292
0
"""simple docstring""" import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel UpperCAmelCase__ = False UpperCAmelCase__ = True UpperCAmelCase__ = False if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--repo_path', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = { 'image_size': 'sample_size', 'num_res_blocks': 'layers_per_block', 'block_channels': 'block_out_channels', 'down_blocks': 'down_block_types', 'up_blocks': 'up_block_types', 'downscale_freq_shift': 'freq_shift', 'resnet_num_groups': 'norm_num_groups', 'resnet_act_fn': 'act_fn', 'resnet_eps': 'norm_eps', 'num_head_channels': 'attention_head_dim', } UpperCAmelCase__ = { 'time_steps': 'time_proj', 'mid': 'mid_block', 'downsample_blocks': 'down_blocks', 'upsample_blocks': 'up_blocks', } UpperCAmelCase__ = '' if has_file(args.repo_path, 'config.json') else 'unet' with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader: UpperCAmelCase__ = reader.read() UpperCAmelCase__ = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, 'config.json'): UpperCAmelCase__ = UNetaDModel(**config) else: UpperCAmelCase__ = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel UpperCAmelCase__ = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) UpperCAmelCase__ = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: UpperCAmelCase__ = config[key] del config[key] UpperCAmelCase__ = [k.replace('UNetRes', '') for k in config['down_block_types']] UpperCAmelCase__ = [k.replace('UNetRes', '') for k in config['up_block_types']] if do_only_weights: UpperCAmelCase__ = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin')) UpperCAmelCase__ = {} for param_key, param_value in state_dict.items(): if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'): continue UpperCAmelCase__ = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('.')[0] == key: UpperCAmelCase__ = param_value UpperCAmelCase__ = True if not has_changed: UpperCAmelCase__ = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
288
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = RoFormerTokenizer UpperCamelCase = RoFormerTokenizerFast UpperCamelCase = True UpperCamelCase = True def lowerCamelCase ( self :List[str] ): super().setUp() def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ): return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase ) def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ): return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase ) def lowerCamelCase ( self :Any ): A = "永和服装饰品有限公司,今天天气非常好" A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好" return input_text, output_text def lowerCamelCase ( self :int ): A = self.get_tokenizer() A, A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :str ): A = self.get_rust_tokenizer() A, A = self.get_chinese_input_output_texts() A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , output_text.split() ) A = tokens + [tokenizer.unk_token] A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def lowerCamelCase ( self :Any ): pass def lowerCamelCase ( self :Tuple ): pass def lowerCamelCase ( self :List[str] ): pass
292
0
from functools import lru_cache @lru_cache def A ( _lowercase ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
182
"""simple docstring""" def A__ ( UpperCamelCase , UpperCamelCase = False ): if not isinstance(UpperCamelCase , UpperCamelCase ): A = F"Expected string as input, found {type(UpperCamelCase )}" raise ValueError(UpperCamelCase ) if not isinstance(UpperCamelCase , UpperCamelCase ): A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}" raise ValueError(UpperCamelCase ) A = input_str.split("_" ) A = 0 if use_pascal else 1 A = words[start_index:] A = [word[0].upper() + word[1:] for word in words_to_capitalize] A = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
292
0
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , lowerCamelCase_ ) lowercase__ = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowercase__ = dataset_size < in_memory_max_size else: lowercase__ = False lowercase__ = is_small_dataset(lowerCamelCase_ ) assert result == expected
207
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name _snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ): A = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 A = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _UpperCAmelCase ( lowercase_ ): def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ): super().__init__() self.register_modules( unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , ) A = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ): if latents is None: A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) A = latents.to(__UpperCamelCase ) A = latents * scheduler.init_noise_sigma return latents def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) A = torch.device(f"cuda:{gpu_id}" ) A = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCamelCase , __UpperCamelCase ) def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ): if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) A = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) A = None for cpu_offloaded_model in [self.unet, self.movq]: A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase ) # We'll offload the last model manually. A = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase ( self :str ): if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCamelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__UpperCamelCase ) def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ): A = self._execution_device A = guidance_scale > 1.0 if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) if isinstance(__UpperCamelCase , __UpperCamelCase ): A = torch.cat(__UpperCamelCase , dim=0 ) A = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) A = hint.repeat_interleave(__UpperCamelCase , dim=0 ) A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase ) A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase ) self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase ) A = self.scheduler.timesteps A = self.movq.config.latent_channels A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor ) # create initial latent A = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = {"image_embeds": image_embeds, "hint": hint} A = self.unet( sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0] if do_classifier_free_guidance: A, A = noise_pred.split(latents.shape[1] , dim=1 ) A, A = noise_pred.chunk(2 ) A, A = variance_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) A = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): A, A = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0] # post-processing A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: A = image * 0.5 + 0.5 A = image.clamp(0 , 1 ) A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
292
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __snake_case = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ): A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A = (image_size // patch_size) ** 2 A = num_patches + 1 def lowerCamelCase ( self :Any ): A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self :Union[str, Any] ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ): A = ViTMSNModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ): A = self.type_sequence_label_size A = ViTMSNForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , labels=__UpperCamelCase ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A = 1 A = ViTMSNForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase ( self :Optional[Any] ): A = self.prepare_config_and_inputs() A, A, A = config_and_inputs A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () UpperCamelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :Optional[int] ): A = ViTMSNModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def lowerCamelCase ( self :Any ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def lowerCamelCase ( self :Union[str, Any] ): pass def lowerCamelCase ( self :int ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def lowerCamelCase ( self :Tuple ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowerCamelCase ( self :List[str] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def lowerCamelCase ( self :List[Any] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = ViTMSNModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def A__ ( ): A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self :Union[str, Any] ): return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def lowerCamelCase ( self :Any ): torch.manual_seed(2 ) A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase ) A = self.default_image_processor A = prepare_img() A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): A = model(**__UpperCamelCase ) # verify the logits A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
292
0
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]: '''simple docstring''' __lowercase= F'{sampling_rate}' __lowercase= '1' __lowercase= 'f32le' __lowercase= [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: __lowercase= ffmpeg_process.communicate(lowercase__ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error __lowercase= output_stream[0] __lowercase= np.frombuffer(lowercase__ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ = "f32le" , ) -> List[Any]: '''simple docstring''' __lowercase= F'{sampling_rate}' __lowercase= '1' if format_for_conversion == "s16le": __lowercase= 2 elif format_for_conversion == "f32le": __lowercase= 4 else: raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' ) __lowercase= platform.system() if system == "Linux": __lowercase= 'alsa' __lowercase= 'default' elif system == "Darwin": __lowercase= 'avfoundation' __lowercase= ':0' elif system == "Windows": __lowercase= 'dshow' __lowercase= 'default' __lowercase= [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] __lowercase= int(round(sampling_rate * chunk_length_s ) ) * size_of_sample __lowercase= _ffmpeg_stream(lowercase__ , lowercase__ ) for item in iterator: yield item def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ) -> int: '''simple docstring''' if stream_chunk_s is not None: __lowercase= stream_chunk_s else: __lowercase= chunk_length_s __lowercase= ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ ) if format_for_conversion == "s16le": __lowercase= np.intaa __lowercase= 2 elif format_for_conversion == "f32le": __lowercase= np.floataa __lowercase= 4 else: raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' ) if stride_length_s is None: __lowercase= chunk_length_s / 6 __lowercase= int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowercase__ , (int, float) ): __lowercase= [stride_length_s, stride_length_s] __lowercase= int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample __lowercase= int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample __lowercase= datetime.datetime.now() __lowercase= datetime.timedelta(seconds=lowercase__ ) for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ): # Put everything back in numpy scale __lowercase= np.frombuffer(item['raw'] , dtype=lowercase__ ) __lowercase= ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) __lowercase= sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 1_0 * delta: # We're late !! SKIP continue yield item def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ) -> Any: '''simple docstring''' __lowercase= B'' __lowercase, __lowercase= stride if stride_left + stride_right >= chunk_len: raise ValueError( F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' ) __lowercase= 0 for raw in iterator: acc += raw if stream and len(lowercase__ ) < chunk_len: __lowercase= (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowercase__ ) >= chunk_len: # We are flushing the accumulator __lowercase= (_stride_left, stride_right) __lowercase= {'raw': acc[:chunk_len], 'stride': stride} if stream: __lowercase= False yield item __lowercase= stride_left __lowercase= acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowercase__ ) > stride_left: __lowercase= {'raw': acc, 'stride': (_stride_left, 0)} if stream: __lowercase= False yield item def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[Any]: '''simple docstring''' __lowercase= 2**2_4 # 16Mo try: with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process: while True: __lowercase= ffmpeg_process.stdout.read(lowercase__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
295
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Optional[int] = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''vivit''' def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = num_frames A = tubelet_size A = num_channels A = qkv_bias super().__init__(**__UpperCamelCase )
292
0
"""simple docstring""" from timeit import timeit def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" if number < 0: raise ValueError('the value of input must not be negative' ) lowerCAmelCase__ :List[str] = 0 while number: number &= number - 1 result += 1 return result def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" if number < 0: raise ValueError('the value of input must not be negative' ) lowerCAmelCase__ :Optional[Any] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __A () ->None: """simple docstring""" def do_benchmark(_SCREAMING_SNAKE_CASE ) -> None: lowerCAmelCase__ :int = 'import __main__ as z' print(F"Benchmark when {number = }:" ) print(F"{get_set_bits_count_using_modulo_operator(_SCREAMING_SNAKE_CASE ) = }" ) lowerCAmelCase__ :Union[str, Any] = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=_SCREAMING_SNAKE_CASE ) print(F"timeit() runs in {timing} seconds" ) print(F"{get_set_bits_count_using_brian_kernighans_algorithm(_SCREAMING_SNAKE_CASE ) = }" ) lowerCAmelCase__ :Optional[Any] = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=_SCREAMING_SNAKE_CASE , ) print(F"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(_SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
293
"""simple docstring""" import math def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be an integer" raise TypeError(_SCREAMING_SNAKE_CASE ) if number < 1: lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be > 0" raise ValueError(_SCREAMING_SNAKE_CASE ) elif number == 1: return 3 elif number == 2: return 5 else: lowerCAmelCase__ :Union[str, Any] = int(math.log(number // 3 , 2 ) ) + 2 lowerCAmelCase__ :Optional[Any] = [3, 5] lowerCAmelCase__ :Optional[Any] = 2 lowerCAmelCase__ :List[str] = 3 for block in range(1 , _SCREAMING_SNAKE_CASE ): for _ in range(_SCREAMING_SNAKE_CASE ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): __A = 0 try: __A = proth(number) except ValueError: print(F'''ValueError: there is no {number}th Proth number''') continue print(F'''The {number}th Proth number: {value}''')
293
1
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
293
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __A = TypeVar("""KEY""") __A = TypeVar("""VAL""") @dataclass(frozen=a , slots=a ) class _lowerCAmelCase ( Generic[KEY, VAL] ): """simple docstring""" __magic_name__ :KEY __magic_name__ :VAL class _lowerCAmelCase ( _Item ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __bool__( self ): '''simple docstring''' return False __A = _DeletedItem() class _lowerCAmelCase ( MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.75 ): '''simple docstring''' lowerCAmelCase__ :List[str] = initial_block_size lowerCAmelCase__ :list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ :Tuple = capacity_factor lowerCAmelCase__ :str = 0 def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return hash(__UpperCAmelCase ) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return (ind + 1) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = self._buckets[ind] if not stored: lowerCAmelCase__ :Dict = _Item(__UpperCAmelCase , __UpperCAmelCase ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ :Optional[Any] = _Item(__UpperCAmelCase , __UpperCAmelCase ) return True else: return False def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ :Optional[Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self._buckets lowerCAmelCase__ :Tuple = [None] * new_size lowerCAmelCase__ :List[Any] = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self._get_bucket_index(__UpperCAmelCase ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ :Tuple = self._get_next_ind(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): break def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(__UpperCAmelCase , __UpperCAmelCase ) def __delitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): lowerCAmelCase__ :int = self._buckets[ind] if item is None: raise KeyError(__UpperCAmelCase ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ :List[str] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): lowerCAmelCase__ :str = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__UpperCAmelCase ) def __len__( self ): '''simple docstring''' return self._len def __iter__( self ): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ' ,'.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
293
1
"""simple docstring""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ :Dict = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append( (F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append( (F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append( (F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append( (F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('encoder.deit.cls_token', 'encoder.embeddings.cls_token'), ('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'), ('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'), ('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'), ('encoder.deit.norm.weight', 'encoder.layernorm.weight'), ('encoder.deit.norm.bias', 'encoder.layernorm.bias'), ] ) return rename_keys def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) lowerCAmelCase__ :str = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" ) lowerCAmelCase__ :Dict = in_proj_weight[ : encoder_config.hidden_size, : ] lowerCAmelCase__ :Dict = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] lowerCAmelCase__ :Tuple = in_proj_weight[ -encoder_config.hidden_size :, : ] def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str: """simple docstring""" lowerCAmelCase__ :List[Any] = dct.pop(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = val def __A (_SCREAMING_SNAKE_CASE ) ->Any: """simple docstring""" if "handwritten" in checkpoint_url: lowerCAmelCase__ :Tuple = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: lowerCAmelCase__ :Any = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg' lowerCAmelCase__ :str = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' ) return im @torch.no_grad() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]: """simple docstring""" lowerCAmelCase__ :List[Any] = ViTConfig(image_size=384 , qkv_bias=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Tuple = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: lowerCAmelCase__ :int = 768 elif "large" in checkpoint_url: # use ViT-large encoder lowerCAmelCase__ :str = 1024 lowerCAmelCase__ :Optional[int] = 4096 lowerCAmelCase__ :Tuple = 24 lowerCAmelCase__ :List[Any] = 16 lowerCAmelCase__ :Optional[Any] = 1024 else: raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: lowerCAmelCase__ :Optional[Any] = False lowerCAmelCase__ :Optional[Any] = 'relu' lowerCAmelCase__ :Any = 1024 lowerCAmelCase__ :List[Any] = True lowerCAmelCase__ :Dict = False lowerCAmelCase__ :Optional[Any] = False # load HuggingFace model lowerCAmelCase__ :str = ViTModel(_SCREAMING_SNAKE_CASE , add_pooling_layer=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :int = TrOCRForCausalLM(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = VisionEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE ) model.eval() # load state_dict of original model, rename some keys lowerCAmelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )['model'] lowerCAmelCase__ :Union[str, Any] = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): lowerCAmelCase__ :List[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE ) if key.startswith('decoder' ) and "output_projection" not in key: lowerCAmelCase__ :List[Any] = val else: lowerCAmelCase__ :Dict = val # load state dict model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image lowerCAmelCase__ :Optional[Any] = ViTImageProcessor(size=encoder_config.image_size ) lowerCAmelCase__ :Dict = RobertaTokenizer.from_pretrained('roberta-large' ) lowerCAmelCase__ :List[str] = TrOCRProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[Any] = processor(images=prepare_img(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values # verify logits lowerCAmelCase__ :List[str] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) lowerCAmelCase__ :List[Any] = model(pixel_values=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = outputs.logits lowerCAmelCase__ :Dict = torch.Size([1, 1, 5_0265] ) if "trocr-base-handwritten" in checkpoint_url: lowerCAmelCase__ :Optional[Any] = torch.tensor( [-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] ) elif "trocr-large-handwritten" in checkpoint_url: lowerCAmelCase__ :Tuple = torch.tensor( [-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] ) elif "trocr-base-printed" in checkpoint_url: lowerCAmelCase__ :Dict = torch.tensor( [-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] ) elif "trocr-large-printed" in checkpoint_url: lowerCAmelCase__ :Optional[Any] = torch.tensor( [-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , _SCREAMING_SNAKE_CASE , atol=1e-3 ), "First elements of logits not as expected" Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) __A = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
293
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel __A = logging.getLogger(__name__) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" if os.path.exists(_SCREAMING_SNAKE_CASE ): if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) else: os.makedirs(_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ :Dict = 2 if unlogit: lowerCAmelCase__ :List[str] = torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :str = p * torch.log(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[str] = 0 return -plogp.sum(dim=-1 ) def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" logger.info('lv, h >\t' + '\t'.join(F"{x + 1}" for x in range(len(_SCREAMING_SNAKE_CASE ) ) ) ) for row in range(len(_SCREAMING_SNAKE_CASE ) ): if tensor.dtype != torch.long: logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:.5f}" for x in tensor[row].cpu().data ) ) else: logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:d}" for x in tensor[row].cpu().data ) ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ :Dict = model.config.num_hidden_layers, model.config.num_attention_heads lowerCAmelCase__ :Any = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) lowerCAmelCase__ :Tuple = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) if head_mask is None: lowerCAmelCase__ :Optional[int] = torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) head_mask.requires_grad_(requires_grad=_SCREAMING_SNAKE_CASE ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCAmelCase__ :List[str] = None lowerCAmelCase__ :Any = 0.0 lowerCAmelCase__ :Any = 0.0 for step, inputs in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): lowerCAmelCase__ :str = tuple(t.to(args.device ) for t in inputs ) ((lowerCAmelCase__) , ) :Dict = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCAmelCase__ :str = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = entropy(attn.detach() , _SCREAMING_SNAKE_CASE ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_SCREAMING_SNAKE_CASE ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCAmelCase__ :Union[str, Any] = 2 lowerCAmelCase__ :Tuple = torch.pow(torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: lowerCAmelCase__ :str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) logger.info('Head ranked by importance scores' ) lowerCAmelCase__ :List[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCAmelCase__ :List[Any] = torch.arange( head_importance.numel() , device=args.device ) lowerCAmelCase__ :int = head_ranks.view_as(_SCREAMING_SNAKE_CASE ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) return attn_entropy, head_importance, total_loss def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , _SCREAMING_SNAKE_CASE , original_score * args.masking_threshold ) lowerCAmelCase__ :Optional[int] = torch.ones_like(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCAmelCase__ :List[str] = original_score while current_score >= original_score * args.masking_threshold: lowerCAmelCase__ :List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCAmelCase__ :str = float('Inf' ) lowerCAmelCase__ :List[str] = head_importance.view(-1 ).sort()[1] if len(_SCREAMING_SNAKE_CASE ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads lowerCAmelCase__ :int = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) lowerCAmelCase__ :Dict = new_head_mask.view(-1 ) lowerCAmelCase__ :Any = 0.0 lowerCAmelCase__ :Tuple = new_head_mask.view_as(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[int] = new_head_mask.clone().detach() print_ad_tensor(_SCREAMING_SNAKE_CASE ) # Compute metric and head importance again lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , _SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('Final head mask' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = 1 / loss lowerCAmelCase__ :Tuple = datetime.now() - before_time lowerCAmelCase__ :List[str] = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ :List[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_SCREAMING_SNAKE_CASE ) ) } for k, v in heads_to_prune.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Union[str, Any] = [ v, ] assert sum(len(_SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ :int = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , actually_pruned=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ :int = 1 / loss lowerCAmelCase__ :Tuple = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 100 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 ) save_model(_SCREAMING_SNAKE_CASE , args.output_dir ) def __A () ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=_SCREAMING_SNAKE_CASE , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=_SCREAMING_SNAKE_CASE , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=_SCREAMING_SNAKE_CASE , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=128 , type=_SCREAMING_SNAKE_CASE , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=_SCREAMING_SNAKE_CASE , help='Batch size.' ) parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 ) parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) lowerCAmelCase__ :Any = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCAmelCase__ :List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) lowerCAmelCase__ :Optional[int] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCAmelCase__ :Dict = torch.device('cuda' , args.local_rank ) lowerCAmelCase__ :Tuple = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCAmelCase__ :int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCAmelCase__ :Optional[Any] = nn.parallel.DistributedDataParallel( _SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_SCREAMING_SNAKE_CASE ) elif args.n_gpu > 1: lowerCAmelCase__ :Union[str, Any] = nn.DataParallel(_SCREAMING_SNAKE_CASE ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE ) # Prepare dataset lowerCAmelCase__ :Optional[int] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCAmelCase__ :Union[str, Any] = (torch.from_numpy(_SCREAMING_SNAKE_CASE ),) lowerCAmelCase__ :Optional[int] = TensorDataset(*_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = RandomSampler(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCAmelCase__ :Optional[Any] = mask_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) prune_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
293
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { """SCUT-DLVCLab/lilt-roberta-en-base""": ( """https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json""" ), } class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Any = """lilt""" def __init__( self , __UpperCAmelCase=3_0_5_2_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=None , __UpperCAmelCase=4 , __UpperCAmelCase=1_0_2_4 , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ :Tuple = vocab_size lowerCAmelCase__ :Optional[int] = hidden_size lowerCAmelCase__ :Optional[Any] = num_hidden_layers lowerCAmelCase__ :str = num_attention_heads lowerCAmelCase__ :str = hidden_act lowerCAmelCase__ :Optional[int] = intermediate_size lowerCAmelCase__ :Dict = hidden_dropout_prob lowerCAmelCase__ :str = attention_probs_dropout_prob lowerCAmelCase__ :str = max_position_embeddings lowerCAmelCase__ :Optional[int] = type_vocab_size lowerCAmelCase__ :Union[str, Any] = initializer_range lowerCAmelCase__ :Any = layer_norm_eps lowerCAmelCase__ :List[Any] = position_embedding_type lowerCAmelCase__ :Optional[Any] = classifier_dropout lowerCAmelCase__ :Dict = channel_shrink_ratio lowerCAmelCase__ :Dict = max_ad_position_embeddings
293
"""simple docstring""" import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = 1_0 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = [1, 2, 3, 4] lowerCAmelCase__ :Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = '' lowerCAmelCase__ , lowerCAmelCase__ :Any = process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) self.assertEqual(__UpperCAmelCase , [] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) lowerCAmelCase__ , lowerCAmelCase__ :str = process_story(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :List[str] = ['It was the best of times.'] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = torch.tensor([1, 2, 3, 4] ) lowerCAmelCase__ :List[str] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowerCAmelCase__ :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 2_3 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowerCAmelCase__ :Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 1_0_1 lowerCAmelCase__ :str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowerCAmelCase__ :Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowerCAmelCase__ :List[Any] = compute_token_type_ids(__UpperCAmelCase , __UpperCAmelCase ) np.testing.assert_array_equal(__UpperCAmelCase , __UpperCAmelCase )
293
1
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart __A = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } __A = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def __A () ->List[Any]: """simple docstring""" lowerCAmelCase__ :Dict = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowerCAmelCase__ :Union[str, Any] = bs[:] lowerCAmelCase__ :Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 lowerCAmelCase__ :List[str] = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __A (_SCREAMING_SNAKE_CASE ) ->Tuple: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = set() lowerCAmelCase__ :str = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ :Optional[int] = char return pairs class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Optional[int] = VOCAB_FILES_NAMES __magic_name__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP __magic_name__ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ :List[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token lowerCAmelCase__ :int = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token lowerCAmelCase__ :Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token lowerCAmelCase__ :str = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token lowerCAmelCase__ :Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token lowerCAmelCase__ :Optional[int] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase__ :Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle: lowerCAmelCase__ :int = json.load(__UpperCAmelCase ) lowerCAmelCase__ :Dict = {v: k for k, v in self.encoder.items()} lowerCAmelCase__ :List[Any] = errors # how to handle errors in decoding lowerCAmelCase__ :Optional[int] = bytes_to_unicode() lowerCAmelCase__ :Tuple = {v: k for k, v in self.byte_encoder.items()} with open(__UpperCAmelCase , encoding='utf-8' ) as merges_handle: lowerCAmelCase__ :List[Any] = merges_handle.read().split('\n' )[1:-1] lowerCAmelCase__ :List[Any] = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase__ :Any = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ :Union[str, Any] = {} lowerCAmelCase__ :List[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase__ :str = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def snake_case ( self ): '''simple docstring''' return len(self.encoder ) def snake_case ( self ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCAmelCase__ :List[Any] = tuple(__UpperCAmelCase ) lowerCAmelCase__ :Dict = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: lowerCAmelCase__ :str = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ :List[str] = bigram lowerCAmelCase__ :Dict = [] lowerCAmelCase__ :Optional[Any] = 0 while i < len(__UpperCAmelCase ): try: lowerCAmelCase__ :List[Any] = word.index(__UpperCAmelCase , __UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ :Dict = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ :Union[str, Any] = tuple(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = new_word if len(__UpperCAmelCase ) == 1: break else: lowerCAmelCase__ :Dict = get_pairs(__UpperCAmelCase ) lowerCAmelCase__ :str = ' '.join(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = word return word def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :str = [] for token in re.findall(self.pat , __UpperCAmelCase ): lowerCAmelCase__ :Dict = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(' ' ) ) return bpe_tokens def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.decoder.get(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = ''.join(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase__ :Optional[int] = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ :Optional[int] = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + '\n' ) lowerCAmelCase__ :Optional[int] = 0 with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ' Please check that the tokenizer is not corrupted!' ) lowerCAmelCase__ :Union[str, Any] = token_index writer.write(' '.join(__UpperCAmelCase ) + '\n' ) index += 1 return vocab_file, merge_file def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ :List[str] = [self.cls_token_id] lowerCAmelCase__ :str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = [self.sep_token_id] lowerCAmelCase__ :Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :int = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()): lowerCAmelCase__ :Any = ' ' + text return (text, kwargs)
293
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 'hf-internal-testing/tiny-random-t5' lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Any = tokenizer('This is me' , return_tensors='pt' ) lowerCAmelCase__ :Dict = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) lowerCAmelCase__ :Optional[Any] = model.generate(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) lowerCAmelCase__ :Union[str, Any] = model_reloaded.generate(**__UpperCAmelCase ) self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = 'hf-internal-testing/tiny-random-t5' lowerCAmelCase__ :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__UpperCAmelCase ): model.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = model.reverse_bettertransformer() model.save_pretrained(__UpperCAmelCase )
293
1
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=a ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __magic_name__ :ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} ) __magic_name__ :ClassVar[Features] = Features( { """answers""": Sequence( { """text""": Value("""string""" ), """answer_start""": Value("""int32""" ), } ) } ) __magic_name__ :str = "question" __magic_name__ :str = "context" __magic_name__ :str = "answers" @property def snake_case ( self ): '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] __A = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] __A = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): __A = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCAmelCase : """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=2_4 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=1_0_0_0 , ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = parent lowerCAmelCase__ :Any = batch_size lowerCAmelCase__ :Union[str, Any] = seq_length lowerCAmelCase__ :Union[str, Any] = is_training lowerCAmelCase__ :Union[str, Any] = use_input_mask lowerCAmelCase__ :Dict = use_token_type_ids lowerCAmelCase__ :Optional[Any] = use_labels lowerCAmelCase__ :Tuple = vocab_size lowerCAmelCase__ :Any = hidden_size lowerCAmelCase__ :Dict = num_hidden_layers lowerCAmelCase__ :str = num_attention_heads lowerCAmelCase__ :Optional[int] = intermediate_size lowerCAmelCase__ :Optional[Any] = hidden_act lowerCAmelCase__ :Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ :List[Any] = attention_probs_dropout_prob lowerCAmelCase__ :str = max_position_embeddings lowerCAmelCase__ :Optional[Any] = type_vocab_size lowerCAmelCase__ :Tuple = type_sequence_label_size lowerCAmelCase__ :Union[str, Any] = initializer_range lowerCAmelCase__ :str = num_labels lowerCAmelCase__ :int = scope lowerCAmelCase__ :str = range_bbox def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCAmelCase__ :Union[str, Any] = bbox[i, j, 3] lowerCAmelCase__ :Optional[Any] = bbox[i, j, 1] lowerCAmelCase__ :Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCAmelCase__ :Tuple = bbox[i, j, 2] lowerCAmelCase__ :Tuple = bbox[i, j, 0] lowerCAmelCase__ :Tuple = t lowerCAmelCase__ :str = None if self.use_input_mask: lowerCAmelCase__ :Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) lowerCAmelCase__ :Dict = None if self.use_token_type_ids: lowerCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ :Any = None lowerCAmelCase__ :int = None if self.use_labels: lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ :List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def snake_case ( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :List[Any] = LiltModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ :int = model(__UpperCAmelCase , bbox=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :List[Any] = self.num_labels lowerCAmelCase__ :Optional[int] = LiltForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Optional[int] = model( __UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :List[Any] = LiltForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Optional[int] = model( __UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) :Tuple = config_and_inputs lowerCAmelCase__ :Optional[Any] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class _lowerCAmelCase ( a , a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :Optional[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ :Union[str, Any] = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ :Tuple = False __magic_name__ :str = False def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return True def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = LiltModelTester(self ) lowerCAmelCase__ :List[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 ) def snake_case ( self ): '''simple docstring''' self.config_tester.run_common_tests() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase__ :str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) @slow def snake_case ( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ :Optional[Any] = LiltModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_torch @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(__UpperCAmelCase ) lowerCAmelCase__ :str = torch.tensor([[1, 2]] , device=__UpperCAmelCase ) lowerCAmelCase__ :Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ :Dict = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = torch.Size([1, 2, 7_6_8] ) lowerCAmelCase__ :Optional[Any] = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=__UpperCAmelCase , ) self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3 ) )
293
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowerCAmelCase ( yaml.SafeLoader ): """simple docstring""" def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase__ :str = [tuple(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else key for key in keys] lowerCAmelCase__ :Optional[int] = Counter(__UpperCAmelCase ) lowerCAmelCase__ :int = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase ) self._check_no_duplicates_on_constructed_node(__UpperCAmelCase ) return mapping def __A (_SCREAMING_SNAKE_CASE ) ->Tuple[Optional[str], str]: """simple docstring""" lowerCAmelCase__ :Optional[Any] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase__ :Optional[int] = full_content[1:].index('---' ) + 1 lowerCAmelCase__ :Union[str, Any] = '\n'.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :List[str] = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(__UpperCAmelCase ) else: return cls() def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path.exists(): with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ :Optional[Any] = readme_file.read() else: lowerCAmelCase__ :Union[str, Any] = None lowerCAmelCase__ :Union[str, Any] = self._to_readme(__UpperCAmelCase ) with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file: readme_file.write(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase = None ): '''simple docstring''' if readme_content is not None: lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = _split_yaml_from_readme(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content else: lowerCAmelCase__ :str = '---\n' + self.to_yaml_string() + '---\n' return full_content @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase__ :int = { (key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' return yaml.safe_dump( { (key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' ) __A = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser __A = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") __A = ap.parse_args() __A = Path(args.readme_filepath) __A = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
293
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""OwlViTFeatureExtractor"""] __A = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
293
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} __A = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""", }, } __A = { """camembert-base""": 512, } __A = """▁""" class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Optional[Any] = VOCAB_FILES_NAMES __magic_name__ :Optional[int] = PRETRAINED_VOCAB_FILES_MAP __magic_name__ :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ :Tuple = ["""input_ids""", """attention_mask"""] __magic_name__ :int = CamembertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) lowerCAmelCase__ :Optional[int] = vocab_file lowerCAmelCase__ :List[Any] = False if not self.vocab_file else True def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ :Optional[Any] = [self.cls_token_id] lowerCAmelCase__ :Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :List[Any] = [self.sep_token_id] lowerCAmelCase__ :Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(__UpperCAmelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase__ :Dict = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
293
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __A = logging.getLogger(__name__) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase=-1 ): '''simple docstring''' lowerCAmelCase__ :Dict = label_idx def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Optional[Any] = mode.value lowerCAmelCase__ :List[str] = os.path.join(__UpperCAmelCase , F"{mode}.txt" ) lowerCAmelCase__ :List[str] = 1 lowerCAmelCase__ :Union[str, Any] = [] with open(__UpperCAmelCase , encoding='utf-8' ) as f: lowerCAmelCase__ :str = [] lowerCAmelCase__ :Dict = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 lowerCAmelCase__ :Tuple = [] lowerCAmelCase__ :List[str] = [] else: lowerCAmelCase__ :List[str] = line.split(' ' ) words.append(splits[0] ) if len(__UpperCAmelCase ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) return examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(__UpperCAmelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowerCAmelCase__ :Optional[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(__UpperCAmelCase ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: lowerCAmelCase__ :Any = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ :Union[str, Any] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__(label_idx=-2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: lowerCAmelCase__ :str = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ :Optional[Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _lowerCAmelCase ( a ): """simple docstring""" def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = mode.value lowerCAmelCase__ :Union[str, Any] = os.path.join(__UpperCAmelCase , F"{mode}.txt" ) lowerCAmelCase__ :Any = 1 lowerCAmelCase__ :Optional[Any] = [] with open(__UpperCAmelCase , encoding='utf-8' ) as f: for sentence in parse_incr(__UpperCAmelCase ): lowerCAmelCase__ :Dict = [] lowerCAmelCase__ :Dict = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 return examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = 0 for sentence in parse_incr(__UpperCAmelCase ): lowerCAmelCase__ :Optional[int] = preds_list[example_id] lowerCAmelCase__ :Tuple = '' for token in sentence: out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(__UpperCAmelCase ) example_id += 1 def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
293
1
"""simple docstring""" import torch from diffusers import StableDiffusionPipeline __A = """path-to-your-trained-model""" __A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") __A = """A photo of sks dog in a bucket""" __A = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
293
"""simple docstring""" from __future__ import annotations __A = tuple[int, int, int] __A = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase __A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" # -------------------------- default selection -------------------------- # rotors -------------------------- __A = """EGZWVONAHDCLFQMSIPJBYUKXTR""" __A = """FOBHMDKEXQNRAULPGSJVTYICZW""" __A = """ZJXESIUQLHAVRMDOYGTNFWPBKC""" # reflector -------------------------- __A = { """A""": """N""", """N""": """A""", """B""": """O""", """O""": """B""", """C""": """P""", """P""": """C""", """D""": """Q""", """Q""": """D""", """E""": """R""", """R""": """E""", """F""": """S""", """S""": """F""", """G""": """T""", """T""": """G""", """H""": """U""", """U""": """H""", """I""": """V""", """V""": """I""", """J""": """W""", """W""": """J""", """K""": """X""", """X""": """K""", """L""": """Y""", """Y""": """L""", """M""": """Z""", """Z""": """M""", } # -------------------------- extra rotors -------------------------- __A = """RMDJXFUWGISLHVTCQNKYPBEZOA""" __A = """SGLCPQWZHKXAREONTFBVIYJUDM""" __A = """HVSICLTYKQUBXDWAJZOMFGPREN""" __A = """RZWQHFMVDBKICJLNTUXAGYPSOE""" __A = """LFKIJODBEGAMQPXVUHYSTCZRWN""" __A = """KOAEGVDHXPQZMLFTYWJNBRCIUS""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: """simple docstring""" if (unique_rotsel := len(set(_SCREAMING_SNAKE_CASE ) )) < 3: lowerCAmelCase__ :Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})" raise Exception(_SCREAMING_SNAKE_CASE ) # Checks if rotor positions are valid lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = rotpos if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Tuple = F"First rotor position is not within range of 1..26 ({rotorposa}" raise ValueError(_SCREAMING_SNAKE_CASE ) if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_SCREAMING_SNAKE_CASE ) if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Union[str, Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_SCREAMING_SNAKE_CASE ) # Validates string and returns dict lowerCAmelCase__ :int = _plugboard(_SCREAMING_SNAKE_CASE ) return rotpos, rotsel, pbdict def __A (_SCREAMING_SNAKE_CASE ) ->dict[str, str]: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :str = F"Plugboard setting isn't type string ({type(_SCREAMING_SNAKE_CASE )})" raise TypeError(_SCREAMING_SNAKE_CASE ) elif len(_SCREAMING_SNAKE_CASE ) % 2 != 0: lowerCAmelCase__ :str = F"Odd number of symbols ({len(_SCREAMING_SNAKE_CASE )})" raise Exception(_SCREAMING_SNAKE_CASE ) elif pbstring == "": return {} pbstring.replace(' ' , '' ) # Checks if all characters are unique lowerCAmelCase__ :Any = set() for i in pbstring: if i not in abc: lowerCAmelCase__ :Any = F"'{i}' not in list of symbols" raise Exception(_SCREAMING_SNAKE_CASE ) elif i in tmppbl: lowerCAmelCase__ :Dict = F"Duplicate symbol ({i})" raise Exception(_SCREAMING_SNAKE_CASE ) else: tmppbl.add(_SCREAMING_SNAKE_CASE ) del tmppbl # Created the dictionary lowerCAmelCase__ :List[Any] = {} for j in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ): lowerCAmelCase__ :Optional[int] = pbstring[j + 1] lowerCAmelCase__ :Union[str, Any] = pbstring[j] return pb def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE = "" , ) ->str: """simple docstring""" lowerCAmelCase__ :Tuple = text.upper() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = _validator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , plugb.upper() ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = rotor_position lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 lowerCAmelCase__ :Dict = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: lowerCAmelCase__ :Dict = plugboard[symbol] # rotor ra -------------------------- lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :str = rotora[index % len(_SCREAMING_SNAKE_CASE )] # rotor rb -------------------------- lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :int = rotora[index % len(_SCREAMING_SNAKE_CASE )] # rotor rc -------------------------- lowerCAmelCase__ :str = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :Optional[Any] = rotora[index % len(_SCREAMING_SNAKE_CASE )] # reflector -------------------------- # this is the reason you don't need another machine to decipher lowerCAmelCase__ :str = reflector[symbol] # 2nd rotors lowerCAmelCase__ :Tuple = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] lowerCAmelCase__ :Optional[int] = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] lowerCAmelCase__ :Any = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] # 2nd plugboard if symbol in plugboard: lowerCAmelCase__ :Union[str, Any] = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :str = 0 rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :List[Any] = 0 rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_SCREAMING_SNAKE_CASE ) return "".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = """This is my Python script that emulates the Enigma machine from WWII.""" __A = (1, 1, 1) __A = """pictures""" __A = (rotora, rotora, rotora) __A = enigma(message, rotor_pos, rotor_sel, pb) print("""Encrypted message:""", en) print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
293
1
"""simple docstring""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __A = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def __A (_SCREAMING_SNAKE_CASE ) ->str: """simple docstring""" lowerCAmelCase__ :List[str] = list(s_dict.keys() ) for key in keys: lowerCAmelCase__ :List[str] = r'.*/layers_(\d+)' lowerCAmelCase__ :Optional[int] = key if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Union[str, Any] = re.sub(r'layers_(\d+)' , r'block/\1/layer' , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[str] = r'(encoder|decoder)\/' if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :List[Any] = re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).groups() if groups[0] == "encoder": lowerCAmelCase__ :Tuple = re.sub(r'/mlp/' , r'/1/mlp/' , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = re.sub(r'/pre_mlp_layer_norm/' , r'/1/layer_norm/' , _SCREAMING_SNAKE_CASE ) elif groups[0] == "decoder": lowerCAmelCase__ :int = re.sub(r'/mlp/' , r'/2/mlp/' , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = re.sub(r'/pre_mlp_layer_norm/' , r'/2/layer_norm/' , _SCREAMING_SNAKE_CASE ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: lowerCAmelCase__ :Tuple = new_key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F"{key} -> {new_key}" ) lowerCAmelCase__ :List[str] = s_dict.pop(_SCREAMING_SNAKE_CASE ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: lowerCAmelCase__ :Optional[int] = s_dict[ 'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: lowerCAmelCase__ :Dict = s_dict[ 'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: lowerCAmelCase__ :Optional[Any] = s_dict[key].shape[0] lowerCAmelCase__ :Optional[int] = s_dict[key] for idx in range(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Tuple = expert_weihts[idx] print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" ) s_dict.pop(_SCREAMING_SNAKE_CASE ) return s_dict __A = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str: """simple docstring""" import regex as re with open(_SCREAMING_SNAKE_CASE , 'r' ) as f: lowerCAmelCase__ :Tuple = f.read() lowerCAmelCase__ :Any = re.findall(r'(.*) = ([0-9.]*)' , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Tuple = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": lowerCAmelCase__ :int = float(_SCREAMING_SNAKE_CASE ) if '.' in value else int(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :int = re.findall(r'(.*activations) = \(\'(.*)\',\)' , _SCREAMING_SNAKE_CASE )[0] lowerCAmelCase__ :str = str(activation[1] ) lowerCAmelCase__ :Optional[Any] = num_experts lowerCAmelCase__ :Tuple = SwitchTransformersConfig(**_SCREAMING_SNAKE_CASE ) return config def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="./" , _SCREAMING_SNAKE_CASE=8 ) ->Tuple: """simple docstring""" print(F"Loading flax weights from : {flax_checkpoint_path}" ) lowerCAmelCase__ :Tuple = checkpoints.load_tax_checkpoint(_SCREAMING_SNAKE_CASE ) if gin_file is not None: lowerCAmelCase__ :int = convert_gin_to_config(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: lowerCAmelCase__ :Optional[int] = SwitchTransformersConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = SwitchTransformersForConditionalGeneration(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = flax_params['target'] lowerCAmelCase__ :Union[str, Any] = flatten_dict(_SCREAMING_SNAKE_CASE , sep='/' ) lowerCAmelCase__ :int = rename_keys(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Union[str, Any] = unflatten_dict(_SCREAMING_SNAKE_CASE , sep='/' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F"Save PyTorch model to {pytorch_dump_path}" ) pt_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") __A = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
293
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def __A (_SCREAMING_SNAKE_CASE ) ->Tuple: """simple docstring""" lowerCAmelCase__ :List[str] = np.max(_outputs , axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Any = """sigmoid""" __magic_name__ :Optional[Any] = """softmax""" __magic_name__ :Optional[Any] = """none""" @add_end_docstrings( a , r""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `\"default\"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `\"sigmoid\"`: Applies the sigmoid function on the output. - `\"softmax\"`: Applies the softmax function on the output. - `\"none\"`: Does not apply any function on the output. """ , ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Union[str, Any] = False __magic_name__ :Dict = ClassificationFunction.NONE def __init__( self , **__UpperCAmelCase ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = tokenizer_kwargs lowerCAmelCase__ :List[Any] = {} if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None: lowerCAmelCase__ :List[Any] = self.model.config.return_all_scores if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None: lowerCAmelCase__ :int = top_k lowerCAmelCase__ :Dict = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __UpperCAmelCase , ) if return_all_scores: lowerCAmelCase__ :List[Any] = None else: lowerCAmelCase__ :Union[str, Any] = 1 if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowerCAmelCase__ :List[Any] = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowerCAmelCase__ :Optional[Any] = 'top_k' not in kwargs if isinstance(args[0] , __UpperCAmelCase ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = self.framework if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.model(**__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True ): '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowerCAmelCase__ :str = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowerCAmelCase__ :int = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None: lowerCAmelCase__ :Optional[Any] = self.model.config.function_to_apply else: lowerCAmelCase__ :Dict = ClassificationFunction.NONE lowerCAmelCase__ :int = model_outputs['logits'][0] lowerCAmelCase__ :Union[str, Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowerCAmelCase__ :Dict = sigmoid(__UpperCAmelCase ) elif function_to_apply == ClassificationFunction.SOFTMAX: lowerCAmelCase__ :int = softmax(__UpperCAmelCase ) elif function_to_apply == ClassificationFunction.NONE: lowerCAmelCase__ :Tuple = outputs else: raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowerCAmelCase__ :Any = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCAmelCase ) ] if not _legacy: dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase ) if top_k is not None: lowerCAmelCase__ :List[str] = dict_scores[:top_k] return dict_scores
293
1
"""simple docstring""" __A = { 0: """0""", 1: """1""", 2: """2""", 3: """3""", 4: """4""", 5: """5""", 6: """6""", 7: """7""", 8: """8""", 9: """9""", 10: """a""", 11: """b""", 12: """c""", 13: """d""", 14: """e""", 15: """f""", } def __A (_SCREAMING_SNAKE_CASE ) ->str: """simple docstring""" assert type(_SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[int] = int(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = '' lowerCAmelCase__ :str = False if decimal < 0: lowerCAmelCase__ :Optional[Any] = True decimal *= -1 while decimal > 0: lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = divmod(_SCREAMING_SNAKE_CASE , 16 ) lowerCAmelCase__ :Union[str, Any] = values[remainder] + hexadecimal lowerCAmelCase__ :List[Any] = '0x' + hexadecimal if negative: lowerCAmelCase__ :List[Any] = '-' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
293
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) lowerCAmelCase__ :Union[str, Any] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) ) return round(_SCREAMING_SNAKE_CASE , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
293
1
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = {"""vocab_file""": """spiece.model"""} __A = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", } } __A = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } __A = """▁""" class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :int = VOCAB_FILES_NAMES __magic_name__ :Any = PRETRAINED_VOCAB_FILES_MAP __magic_name__ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = ( AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token ) lowerCAmelCase__ :Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) lowerCAmelCase__ :Tuple = do_lower_case lowerCAmelCase__ :Tuple = remove_space lowerCAmelCase__ :Optional[Any] = keep_accents lowerCAmelCase__ :List[str] = vocab_file lowerCAmelCase__ :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def snake_case ( self ): '''simple docstring''' return len(self.sp_model ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): '''simple docstring''' lowerCAmelCase__ :Dict = self.__dict__.copy() lowerCAmelCase__ :Tuple = None return state def __setstate__( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase__ :Tuple = {} lowerCAmelCase__ :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if self.remove_space: lowerCAmelCase__ :Union[str, Any] = ' '.join(inputs.strip().split() ) else: lowerCAmelCase__ :Union[str, Any] = inputs lowerCAmelCase__ :List[str] = outputs.replace('``' , '"' ).replace('\'\'' , '"' ) if not self.keep_accents: lowerCAmelCase__ :Tuple = unicodedata.normalize('NFKD' , __UpperCAmelCase ) lowerCAmelCase__ :Any = ''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: lowerCAmelCase__ :Dict = outputs.lower() return outputs def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :str = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) lowerCAmelCase__ :int = [] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): lowerCAmelCase__ :Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase__ :Dict = cur_pieces[1:] else: lowerCAmelCase__ :str = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.sp_model.PieceToId(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.sp_model.IdToPiece(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :str = [] lowerCAmelCase__ :Dict = '' lowerCAmelCase__ :str = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ :List[str] = True lowerCAmelCase__ :int = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :int = [self.sep_token_id] lowerCAmelCase__ :List[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :Dict = [self.sep_token_id] lowerCAmelCase__ :Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase__ :List[str] = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , 'wb' ) as fi: lowerCAmelCase__ :Tuple = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""OwlViTFeatureExtractor"""] __A = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) lowerCAmelCase__ :Union[str, Any] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) ) return round(_SCREAMING_SNAKE_CASE , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
293
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' pass def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __A = ( """https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png""" ) @is_pipeline_test @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __magic_name__ :str = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = pipeline( 'document-question-answering' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) lowerCAmelCase__ :List[Any] = 'What is the placebo?' lowerCAmelCase__ :Dict = [ { 'image': load_image(__UpperCAmelCase ), 'question': question, }, { 'image': image, 'question': question, }, { 'image': image, 'question': question, 'word_boxes': word_boxes, }, ] return dqa_pipeline, examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :int = dqa_pipeline(__UpperCAmelCase , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )}, {'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' ) lowerCAmelCase__ :Union[str, Any] = INVOICE_URL lowerCAmelCase__ :Tuple = 'How many cats are there?' lowerCAmelCase__ :List[str] = [ {'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9}, {'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0}, ] lowerCAmelCase__ :Any = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase ) lowerCAmelCase__ :Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably lowerCAmelCase__ :List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png' lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(__UpperCAmelCase , [] ) # We can optionnally pass directly the words and bounding boxes lowerCAmelCase__ :Dict = './tests/fixtures/tests_samples/COCO/000000039769.png' lowerCAmelCase__ :List[str] = [] lowerCAmelCase__ :int = [] lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 ) self.assertEqual(__UpperCAmelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , ) lowerCAmelCase__ :str = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :Dict = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , ) lowerCAmelCase__ :List[Any] = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :Optional[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :int = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , ) lowerCAmelCase__ :List[str] = INVOICE_URL lowerCAmelCase__ :Any = 'What is the invoice number?' lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) lowerCAmelCase__ :Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] ] * 2 , ) lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) # This model should also work if `image` is set to None lowerCAmelCase__ :Tuple = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) @slow @require_torch @require_pytesseract @require_vision def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , max_seq_len=5_0 , ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] ] * 2 , ) lowerCAmelCase__ :Optional[Any] = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) # This model should also work if `image` is set to None lowerCAmelCase__ :List[str] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) @slow @require_torch def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = pipeline( 'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :str = 'What is the invoice number?' lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'answer': 'us-001'}] ) @require_tf @unittest.skip('Document question answering not implemented in TF' ) def snake_case ( self ): '''simple docstring''' pass
293
1
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __A = logging.getLogger(__name__) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase=-1 ): '''simple docstring''' lowerCAmelCase__ :Dict = label_idx def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Optional[Any] = mode.value lowerCAmelCase__ :List[str] = os.path.join(__UpperCAmelCase , F"{mode}.txt" ) lowerCAmelCase__ :List[str] = 1 lowerCAmelCase__ :Union[str, Any] = [] with open(__UpperCAmelCase , encoding='utf-8' ) as f: lowerCAmelCase__ :str = [] lowerCAmelCase__ :Dict = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 lowerCAmelCase__ :Tuple = [] lowerCAmelCase__ :List[str] = [] else: lowerCAmelCase__ :List[str] = line.split(' ' ) words.append(splits[0] ) if len(__UpperCAmelCase ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) return examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(__UpperCAmelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowerCAmelCase__ :Optional[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(__UpperCAmelCase ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: lowerCAmelCase__ :Any = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ :Union[str, Any] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__(label_idx=-2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: lowerCAmelCase__ :str = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ :Optional[Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _lowerCAmelCase ( a ): """simple docstring""" def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = mode.value lowerCAmelCase__ :Union[str, Any] = os.path.join(__UpperCAmelCase , F"{mode}.txt" ) lowerCAmelCase__ :Any = 1 lowerCAmelCase__ :Optional[Any] = [] with open(__UpperCAmelCase , encoding='utf-8' ) as f: for sentence in parse_incr(__UpperCAmelCase ): lowerCAmelCase__ :Dict = [] lowerCAmelCase__ :Dict = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 return examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = 0 for sentence in parse_incr(__UpperCAmelCase ): lowerCAmelCase__ :Optional[int] = preds_list[example_id] lowerCAmelCase__ :Tuple = '' for token in sentence: out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(__UpperCAmelCase ) example_id += 1 def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
293
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :Tuple = StableDiffusionXLImgaImgPipeline __magic_name__ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} __magic_name__ :Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} __magic_name__ :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __magic_name__ :str = IMAGE_TO_IMAGE_IMAGE_PARAMS __magic_name__ :Any = IMAGE_TO_IMAGE_IMAGE_PARAMS def snake_case ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ :Optional[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , ) lowerCAmelCase__ :str = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , ) torch.manual_seed(0 ) lowerCAmelCase__ :str = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCAmelCase__ :str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , ) lowerCAmelCase__ :int = CLIPTextModel(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase ) lowerCAmelCase__ :Any = CLIPTextModelWithProjection(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase ) lowerCAmelCase__ :str = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith('mps' ): lowerCAmelCase__ :Optional[int] = torch.manual_seed(__UpperCAmelCase ) else: lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :int = self.get_dummy_components() lowerCAmelCase__ :List[str] = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :int = sd_pipe(**__UpperCAmelCase ).images lowerCAmelCase__ :int = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase__ :List[str] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def snake_case ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.get_dummy_components() lowerCAmelCase__ :str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # forward without prompt embeds lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt'] lowerCAmelCase__ :Tuple = negative_prompt lowerCAmelCase__ :str = 3 * [inputs['prompt']] lowerCAmelCase__ :Optional[Any] = sd_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = 3 * ['this is a negative prompt'] lowerCAmelCase__ :str = 3 * [inputs.pop('prompt' )] ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) :List[str] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe( **__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , ) lowerCAmelCase__ :Optional[Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ): '''simple docstring''' lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) ) lowerCAmelCase__ :Optional[int] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) lowerCAmelCase__ :int = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase ) lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase__ :List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
293
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Tuple = """switch_transformers""" __magic_name__ :int = ["""past_key_values"""] __magic_name__ :List[Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , __UpperCAmelCase=3_2_1_2_8 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=6_4 , __UpperCAmelCase=2_0_4_8 , __UpperCAmelCase=6_4 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_2 , __UpperCAmelCase=8 , __UpperCAmelCase=False , __UpperCAmelCase=0.01 , __UpperCAmelCase="float32" , __UpperCAmelCase=False , __UpperCAmelCase=3_2 , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=0.0_01 , __UpperCAmelCase=0.0_01 , __UpperCAmelCase=1.0 , __UpperCAmelCase="relu" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = vocab_size lowerCAmelCase__ :Dict = d_model lowerCAmelCase__ :Union[str, Any] = d_kv lowerCAmelCase__ :Union[str, Any] = d_ff lowerCAmelCase__ :List[str] = num_sparse_encoder_layers lowerCAmelCase__ :Dict = num_layers lowerCAmelCase__ :str = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase__ :List[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase__ :int = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase__ :Any = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase__ :int = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase__ :List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase__ :Optional[int] = num_heads lowerCAmelCase__ :Union[str, Any] = num_experts lowerCAmelCase__ :Dict = expert_capacity lowerCAmelCase__ :str = router_bias lowerCAmelCase__ :int = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) lowerCAmelCase__ :Union[str, Any] = router_dtype lowerCAmelCase__ :List[Any] = router_ignore_padding_tokens lowerCAmelCase__ :str = relative_attention_num_buckets lowerCAmelCase__ :Dict = relative_attention_max_distance lowerCAmelCase__ :Optional[Any] = dropout_rate lowerCAmelCase__ :Optional[int] = layer_norm_epsilon lowerCAmelCase__ :List[str] = initializer_factor lowerCAmelCase__ :str = feed_forward_proj lowerCAmelCase__ :Union[str, Any] = use_cache lowerCAmelCase__ :Tuple = add_router_probs lowerCAmelCase__ :Dict = router_z_loss_coef lowerCAmelCase__ :Any = router_aux_loss_coef lowerCAmelCase__ :int = self.feed_forward_proj.split('-' ) lowerCAmelCase__ :List[Any] = act_info[-1] lowerCAmelCase__ :Dict = act_info[0] == 'gated' if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2: raise ValueError( F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase__ :List[str] = 'gelu_new' super().__init__( pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
293
"""simple docstring""" import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" lowerCAmelCase__ :str = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(F"Building PyTorch model from configuration: {config}" ) lowerCAmelCase__ :int = BertForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __A = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
293
1
"""simple docstring""" from math import log from scipy.constants import Boltzmann, physical_constants __A = 300 # TEMPERATURE (unit = K) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->float: """simple docstring""" if donor_conc <= 0: raise ValueError('Donor concentration should be positive' ) elif acceptor_conc <= 0: raise ValueError('Acceptor concentration should be positive' ) elif intrinsic_conc <= 0: raise ValueError('Intrinsic concentration should be positive' ) elif donor_conc <= intrinsic_conc: raise ValueError( 'Donor concentration should be greater than intrinsic concentration' ) elif acceptor_conc <= intrinsic_conc: raise ValueError( 'Acceptor concentration should be greater than intrinsic concentration' ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
293
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[str] = XGLMTokenizer __magic_name__ :Any = XGLMTokenizerFast __magic_name__ :Dict = True __magic_name__ :Union[str, Any] = True def snake_case ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ :int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = '<pad>' lowerCAmelCase__ :int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCAmelCase__ :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) lowerCAmelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def snake_case ( self ): '''simple docstring''' return XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) def snake_case ( self ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__UpperCAmelCase , f.name ) lowerCAmelCase__ :Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = pickle.dumps(__UpperCAmelCase ) pickle.loads(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCAmelCase__ :Optional[Any] = self.get_tokenizer() lowerCAmelCase__ :List[str] = self.get_rust_tokenizer() lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.' lowerCAmelCase__ :Dict = tokenizer.tokenize(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :int = self.get_rust_tokenizer() lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = 'Hello World!' lowerCAmelCase__ :Tuple = [2, 3_1_2_2_7, 4_4_4_7, 3_5] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off lowerCAmelCase__ :List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5] # fmt: on self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = { 'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='facebook/xglm-564M' , padding=__UpperCAmelCase , )
293
1
"""simple docstring""" import math import qiskit def __A (_SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1 ) ->qiskit.result.counts.Counts: """simple docstring""" if ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ): raise TypeError('inputs must be integers.' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('inputs must be positive.' ) if ( (math.floor(_SCREAMING_SNAKE_CASE ) != input_a) or (math.floor(_SCREAMING_SNAKE_CASE ) != input_a) or (math.floor(_SCREAMING_SNAKE_CASE ) != carry_in) ): raise ValueError('inputs must be exact integers.' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('inputs must be less or equal to 2.' ) # build registers lowerCAmelCase__ :List[str] = qiskit.QuantumRegister(4 , 'qr' ) lowerCAmelCase__ :List[str] = qiskit.ClassicalRegister(2 , 'cr' ) # list the entries lowerCAmelCase__ :str = [input_a, input_a, carry_in] lowerCAmelCase__ :Optional[int] = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(_SCREAMING_SNAKE_CASE ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(_SCREAMING_SNAKE_CASE ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(_SCREAMING_SNAKE_CASE ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , _SCREAMING_SNAKE_CASE ) # measure the last two qbits lowerCAmelCase__ :Dict = qiskit.Aer.get_backend('aer_simulator' ) lowerCAmelCase__ :Dict = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1000 ) return job.result().get_counts(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
293
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __A = Lock() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowerCAmelCase__ :Any = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowerCAmelCase__ :Optional[int] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowerCAmelCase__ :Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ :str = [] lowerCAmelCase__ :Optional[Any] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowerCAmelCase__ :List[str] = Pipe() lowerCAmelCase__ :List[Any] = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowerCAmelCase__ :Dict = temp_rs lowerCAmelCase__ :Optional[Any] = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): lowerCAmelCase__ :Union[str, Any] = Pipe() lowerCAmelCase__ :List[str] = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowerCAmelCase__ :Union[str, Any] = temp_rs lowerCAmelCase__ :Any = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): lowerCAmelCase__ :str = result_pipe[p][0].recv() process_array_[p].join() return arr def __A () ->List[Any]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[str] = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
293
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCAmelCase : """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=3_2 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0 , __UpperCAmelCase=None , ): '''simple docstring''' lowerCAmelCase__ :Dict = parent lowerCAmelCase__ :Union[str, Any] = batch_size lowerCAmelCase__ :List[Any] = seq_length lowerCAmelCase__ :Optional[int] = is_training lowerCAmelCase__ :Union[str, Any] = use_input_mask lowerCAmelCase__ :List[Any] = use_labels lowerCAmelCase__ :str = vocab_size lowerCAmelCase__ :Union[str, Any] = hidden_size lowerCAmelCase__ :List[Any] = projection_dim lowerCAmelCase__ :Optional[Any] = num_hidden_layers lowerCAmelCase__ :int = num_attention_heads lowerCAmelCase__ :Any = intermediate_size lowerCAmelCase__ :Any = dropout lowerCAmelCase__ :Any = attention_dropout lowerCAmelCase__ :List[Any] = max_position_embeddings lowerCAmelCase__ :Optional[int] = initializer_range lowerCAmelCase__ :int = scope lowerCAmelCase__ :Tuple = bos_token_id def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ :str = None if self.use_input_mask: lowerCAmelCase__ :Tuple = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: lowerCAmelCase__ :Union[str, Any] = input_mask.numpy() lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = input_mask.shape lowerCAmelCase__ :Optional[Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__UpperCAmelCase ): lowerCAmelCase__ :Dict = 1 lowerCAmelCase__ :Any = 0 lowerCAmelCase__ :Optional[Any] = self.get_config() return config, input_ids, tf.convert_to_tensor(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = TFBlipTextModel(config=__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , training=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = config_and_inputs lowerCAmelCase__ :Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[Any] = (TFBlipTextModel,) if is_tf_available() else () __magic_name__ :int = False __magic_name__ :Dict = False __magic_name__ :Optional[Any] = False def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = BlipTextModelTester(self ) lowerCAmelCase__ :Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 ) def snake_case ( self ): '''simple docstring''' self.config_tester.run_common_tests() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' pass @unittest.skip(reason='Blip does not use inputs_embeds' ) def snake_case ( self ): '''simple docstring''' pass @unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' ) def snake_case ( self ): '''simple docstring''' pass @unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' ) def snake_case ( self ): '''simple docstring''' pass @slow def snake_case ( self ): '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ :List[Any] = TFBlipTextModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase=True ): '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=__UpperCAmelCase )
293
"""simple docstring""" from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(a ) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , **__UpperCAmelCase ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) requires_backends(self , 'vision' ) requires_backends(self , 'torch' ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) self.check_model_type(__UpperCAmelCase ) def snake_case ( self , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = {} lowerCAmelCase__ :Tuple = {} lowerCAmelCase__ :Any = {} # preprocess args if "points_per_batch" in kwargs: lowerCAmelCase__ :Dict = kwargs['points_per_batch'] if "points_per_crop" in kwargs: lowerCAmelCase__ :Union[str, Any] = kwargs['points_per_crop'] if "crops_n_layers" in kwargs: lowerCAmelCase__ :Any = kwargs['crops_n_layers'] if "crop_overlap_ratio" in kwargs: lowerCAmelCase__ :Any = kwargs['crop_overlap_ratio'] if "crop_n_points_downscale_factor" in kwargs: lowerCAmelCase__ :Dict = kwargs['crop_n_points_downscale_factor'] # postprocess args if "pred_iou_thresh" in kwargs: lowerCAmelCase__ :Tuple = kwargs['pred_iou_thresh'] if "stability_score_offset" in kwargs: lowerCAmelCase__ :Optional[int] = kwargs['stability_score_offset'] if "mask_threshold" in kwargs: lowerCAmelCase__ :List[Any] = kwargs['mask_threshold'] if "stability_score_thresh" in kwargs: lowerCAmelCase__ :Optional[Any] = kwargs['stability_score_thresh'] if "crops_nms_thresh" in kwargs: lowerCAmelCase__ :int = kwargs['crops_nms_thresh'] if "output_rle_mask" in kwargs: lowerCAmelCase__ :Union[str, Any] = kwargs['output_rle_mask'] if "output_bboxes_mask" in kwargs: lowerCAmelCase__ :Optional[Any] = kwargs['output_bboxes_mask'] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , *__UpperCAmelCase , num_workers=__UpperCAmelCase , batch_size=__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=6_4 , __UpperCAmelCase = 0 , __UpperCAmelCase = 5_1_2 / 1_5_0_0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 1 , ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = load_image(__UpperCAmelCase ) lowerCAmelCase__ :int = self.image_processor.size['longest_edge'] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = self.image_processor.generate_crop_boxes( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = self.image_processor(images=__UpperCAmelCase , return_tensors='pt' ) with self.device_placement(): if self.framework == "pt": lowerCAmelCase__ :Optional[int] = self.get_inference_context() with inference_context(): lowerCAmelCase__ :Any = self._ensure_tensor_on_device(__UpperCAmelCase , device=self.device ) lowerCAmelCase__ :Tuple = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) ) lowerCAmelCase__ :Optional[int] = image_embeddings lowerCAmelCase__ :List[Any] = grid_points.shape[1] lowerCAmelCase__ :Union[str, Any] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( 'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ' 'To return all points at once, set points_per_batch to None' ) for i in range(0 , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Optional[Any] = grid_points[:, i : i + points_per_batch, :, :] lowerCAmelCase__ :List[str] = input_labels[:, i : i + points_per_batch] lowerCAmelCase__ :List[Any] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0.88 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , ): '''simple docstring''' lowerCAmelCase__ :Any = model_inputs.pop('input_boxes' ) lowerCAmelCase__ :Optional[int] = model_inputs.pop('is_last' ) lowerCAmelCase__ :Dict = model_inputs.pop('original_sizes' ).tolist() lowerCAmelCase__ :Dict = model_inputs.pop('reshaped_input_sizes' ).tolist() lowerCAmelCase__ :Optional[int] = self.model(**__UpperCAmelCase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks lowerCAmelCase__ :int = model_outputs['pred_masks'] lowerCAmelCase__ :Optional[Any] = self.image_processor.post_process_masks( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , binarize=__UpperCAmelCase ) lowerCAmelCase__ :Any = model_outputs['iou_scores'] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.7 , ): '''simple docstring''' lowerCAmelCase__ :Dict = [] lowerCAmelCase__ :Optional[Any] = [] lowerCAmelCase__ :int = [] for model_output in model_outputs: all_scores.append(model_output.pop('iou_scores' ) ) all_masks.extend(model_output.pop('masks' ) ) all_boxes.append(model_output.pop('boxes' ) ) lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase ) lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self.image_processor.post_process_for_mask_generation( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Tuple = defaultdict(__UpperCAmelCase ) for output in model_outputs: for k, v in output.items(): extra[k].append(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = {} if output_rle_mask: lowerCAmelCase__ :str = rle_mask if output_bboxes_mask: lowerCAmelCase__ :Optional[int] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
293
1
"""simple docstring""" import numpy as np class _lowerCAmelCase : """simple docstring""" def __init__( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = (0, 0) lowerCAmelCase__ :Optional[int] = None lowerCAmelCase__ :List[str] = 0 lowerCAmelCase__ :str = 0 lowerCAmelCase__ :Dict = 0 def __eq__( self , __UpperCAmelCase ): '''simple docstring''' return self.position == cell.position def snake_case ( self ): '''simple docstring''' print(self.position ) class _lowerCAmelCase : """simple docstring""" def __init__( self , __UpperCAmelCase=(5, 5) ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = np.zeros(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = world_size[0] lowerCAmelCase__ :Dict = world_size[1] def snake_case ( self ): '''simple docstring''' print(self.w ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :str = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] lowerCAmelCase__ :List[Any] = cell.position[0] lowerCAmelCase__ :Union[str, Any] = cell.position[1] lowerCAmelCase__ :Union[str, Any] = [] for n in neughbour_cord: lowerCAmelCase__ :List[Any] = current_x + n[0] lowerCAmelCase__ :str = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: lowerCAmelCase__ :Optional[int] = Cell() lowerCAmelCase__ :Dict = (x, y) lowerCAmelCase__ :Optional[int] = cell neighbours.append(__UpperCAmelCase ) return neighbours def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]: """simple docstring""" lowerCAmelCase__ :List[str] = [] lowerCAmelCase__ :Optional[Any] = [] _open.append(_SCREAMING_SNAKE_CASE ) while _open: lowerCAmelCase__ :Tuple = np.argmin([n.f for n in _open] ) lowerCAmelCase__ :Optional[int] = _open[min_f] _closed.append(_open.pop(_SCREAMING_SNAKE_CASE ) ) if current == goal: break for n in world.get_neigbours(_SCREAMING_SNAKE_CASE ): for c in _closed: if c == n: continue lowerCAmelCase__ :Dict = current.g + 1 lowerCAmelCase__ , lowerCAmelCase__ :List[str] = n.position lowerCAmelCase__ , lowerCAmelCase__ :str = goal.position lowerCAmelCase__ :List[Any] = (ya - ya) ** 2 + (xa - xa) ** 2 lowerCAmelCase__ :Optional[Any] = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Tuple = [] while current.parent is not None: path.append(current.position ) lowerCAmelCase__ :int = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": __A = Gridworld() # Start position and goal __A = Cell() __A = (0, 0) __A = Cell() __A = (4, 4) print(F'''path from {start.position} to {goal.position}''') __A = astar(world, start, goal) # Just for visual reasons. for i in s: __A = 1 print(world.w)
293
"""simple docstring""" from __future__ import annotations __A = 1.6_021e-19 # units = C def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple[str, float]: """simple docstring""" if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
293
1
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker __A = """CompVis/stable-diffusion-v1-1""" __A = """CompVis/stable-diffusion-v1-2""" __A = """CompVis/stable-diffusion-v1-3""" __A = """CompVis/stable-diffusion-v1-4""" class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , ): '''simple docstring''' super()._init_() lowerCAmelCase__ :Tuple = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = StableDiffusionPipeline( vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , requires_safety_checker=__UpperCAmelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def snake_case ( self ): '''simple docstring''' return {k: getattr(self , __UpperCAmelCase ) for k in self.config.keys() if not k.startswith('_' )} def snake_case ( self , __UpperCAmelCase = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCAmelCase__ :str = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' self.enable_attention_slicing(__UpperCAmelCase ) @torch.no_grad() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_0 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , **__UpperCAmelCase , ): '''simple docstring''' return self.pipea( prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , ) @torch.no_grad() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_0 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , **__UpperCAmelCase , ): '''simple docstring''' return self.pipea( prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , ) @torch.no_grad() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_0 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , **__UpperCAmelCase , ): '''simple docstring''' return self.pipea( prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , ) @torch.no_grad() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_0 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , **__UpperCAmelCase , ): '''simple docstring''' return self.pipea( prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , ) @torch.no_grad() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_0 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu' self.to(__UpperCAmelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." ) # Get first result from Stable Diffusion Checkpoint v1.1 lowerCAmelCase__ :List[Any] = self.textaimg_sda_a( prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 lowerCAmelCase__ :Optional[int] = self.textaimg_sda_a( prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 lowerCAmelCase__ :List[Any] = self.textaimg_sda_a( prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 lowerCAmelCase__ :Optional[Any] = self.textaimg_sda_a( prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
293
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=1_8 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , ): '''simple docstring''' lowerCAmelCase__ :Dict = size if size is not None else {'height': 1_8, 'width': 1_8} lowerCAmelCase__ :Tuple = parent lowerCAmelCase__ :List[Any] = batch_size lowerCAmelCase__ :List[Any] = num_channels lowerCAmelCase__ :Any = image_size lowerCAmelCase__ :int = min_resolution lowerCAmelCase__ :int = max_resolution lowerCAmelCase__ :Dict = do_resize lowerCAmelCase__ :str = size lowerCAmelCase__ :Any = apply_ocr def snake_case ( self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :str = LayoutLMvaImageProcessor if is_pytesseract_available() else None def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = LayoutLMvaImageProcessingTester(self ) @property def snake_case ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'apply_ocr' ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} ) lowerCAmelCase__ :List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} ) def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , __UpperCAmelCase ) self.assertIsInstance(encoding.boxes , __UpperCAmelCase ) # Test batched lowerCAmelCase__ :Any = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowerCAmelCase__ :Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowerCAmelCase__ :Any = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCAmelCase__ :Tuple = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) lowerCAmelCase__ :int = Image.open(ds[0]['file'] ).convert('RGB' ) lowerCAmelCase__ :Optional[int] = image_processing(__UpperCAmelCase , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCAmelCase__ :Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 lowerCAmelCase__ :List[str] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __UpperCAmelCase ) self.assertListEqual(encoding.boxes , __UpperCAmelCase ) # with apply_OCR = False lowerCAmelCase__ :int = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = image_processing(__UpperCAmelCase , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
293
1
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Optional[int] = ["""image_processor""", """tokenizer"""] __magic_name__ :Union[str, Any] = """LayoutLMv3ImageProcessor""" __magic_name__ :str = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __UpperCAmelCase , ) lowerCAmelCase__ :Union[str, Any] = kwargs.pop('feature_extractor' ) lowerCAmelCase__ :Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor lowerCAmelCase__ :List[str] = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :str = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase__ :Union[str, Any] = features['words'] lowerCAmelCase__ :str = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) # add pixel values lowerCAmelCase__ :List[str] = features.pop('pixel_values' ) if return_overflowing_tokens is True: lowerCAmelCase__ :Any = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] ) lowerCAmelCase__ :Optional[Any] = images return encoded_inputs def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :int = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__UpperCAmelCase ) != len(__UpperCAmelCase ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" ) return images_with_overflow def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def snake_case ( self ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def snake_case ( self ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , ) return self.image_processor_class @property def snake_case ( self ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , ) return self.image_processor
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" import argparse import os import re __A = """src/transformers/models/auto""" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __A = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""") # re pattern that matches identifiers in mappings __A = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""") def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->Any: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: lowerCAmelCase__ :Tuple = f.read() lowerCAmelCase__ :Optional[Any] = content.split('\n' ) lowerCAmelCase__ :Any = [] lowerCAmelCase__ :Tuple = 0 while line_idx < len(_SCREAMING_SNAKE_CASE ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowerCAmelCase__ :Optional[Any] = len(re.search(r'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(' ' * indent + '(' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowerCAmelCase__ :Any = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowerCAmelCase__ :List[str] = line_idx while not lines[line_idx].startswith(' ' * indent + ')' ): line_idx += 1 blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowerCAmelCase__ :List[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_SCREAMING_SNAKE_CASE ) ) elif "\n".join(_SCREAMING_SNAKE_CASE ) != content: return True def __A (_SCREAMING_SNAKE_CASE = False ) ->List[str]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith('.py' )] lowerCAmelCase__ :List[str] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE , overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames] if not overwrite and any(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = [f for f, d in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if d] raise ValueError( F"The following files have auto mappings that need sorting: {', '.join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix" ' this.' ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") __A = parser.parse_args() sort_all_auto_mappings(not args.check_only)
293
"""simple docstring""" import math def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be an integer" raise TypeError(_SCREAMING_SNAKE_CASE ) if number < 1: lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be > 0" raise ValueError(_SCREAMING_SNAKE_CASE ) elif number == 1: return 3 elif number == 2: return 5 else: lowerCAmelCase__ :Union[str, Any] = int(math.log(number // 3 , 2 ) ) + 2 lowerCAmelCase__ :Optional[Any] = [3, 5] lowerCAmelCase__ :Optional[Any] = 2 lowerCAmelCase__ :List[str] = 3 for block in range(1 , _SCREAMING_SNAKE_CASE ): for _ in range(_SCREAMING_SNAKE_CASE ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): __A = 0 try: __A = proth(number) except ValueError: print(F'''ValueError: there is no {number}th Proth number''') continue print(F'''The {number}th Proth number: {value}''')
293
1
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( """The `image_to_image.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionImg2ImgPipeline` instead.""" )
293
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __A = TypeVar("""KEY""") __A = TypeVar("""VAL""") @dataclass(frozen=a , slots=a ) class _lowerCAmelCase ( Generic[KEY, VAL] ): """simple docstring""" __magic_name__ :KEY __magic_name__ :VAL class _lowerCAmelCase ( _Item ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __bool__( self ): '''simple docstring''' return False __A = _DeletedItem() class _lowerCAmelCase ( MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.75 ): '''simple docstring''' lowerCAmelCase__ :List[str] = initial_block_size lowerCAmelCase__ :list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ :Tuple = capacity_factor lowerCAmelCase__ :str = 0 def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return hash(__UpperCAmelCase ) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return (ind + 1) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = self._buckets[ind] if not stored: lowerCAmelCase__ :Dict = _Item(__UpperCAmelCase , __UpperCAmelCase ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ :Optional[Any] = _Item(__UpperCAmelCase , __UpperCAmelCase ) return True else: return False def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ :Optional[Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self._buckets lowerCAmelCase__ :Tuple = [None] * new_size lowerCAmelCase__ :List[Any] = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self._get_bucket_index(__UpperCAmelCase ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ :Tuple = self._get_next_ind(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): break def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(__UpperCAmelCase , __UpperCAmelCase ) def __delitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): lowerCAmelCase__ :int = self._buckets[ind] if item is None: raise KeyError(__UpperCAmelCase ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ :List[str] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): lowerCAmelCase__ :str = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__UpperCAmelCase ) def __len__( self ): '''simple docstring''' return self._len def __iter__( self ): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ' ,'.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
293
1
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[str] = XGLMTokenizer __magic_name__ :Any = XGLMTokenizerFast __magic_name__ :Dict = True __magic_name__ :Union[str, Any] = True def snake_case ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ :int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = '<pad>' lowerCAmelCase__ :int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCAmelCase__ :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) lowerCAmelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def snake_case ( self ): '''simple docstring''' return XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) def snake_case ( self ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__UpperCAmelCase , f.name ) lowerCAmelCase__ :Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = pickle.dumps(__UpperCAmelCase ) pickle.loads(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCAmelCase__ :Optional[Any] = self.get_tokenizer() lowerCAmelCase__ :List[str] = self.get_rust_tokenizer() lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.' lowerCAmelCase__ :Dict = tokenizer.tokenize(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :int = self.get_rust_tokenizer() lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = 'Hello World!' lowerCAmelCase__ :Tuple = [2, 3_1_2_2_7, 4_4_4_7, 3_5] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off lowerCAmelCase__ :List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5] # fmt: on self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = { 'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='facebook/xglm-564M' , padding=__UpperCAmelCase , )
293
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel __A = logging.getLogger(__name__) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" if os.path.exists(_SCREAMING_SNAKE_CASE ): if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) else: os.makedirs(_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ :Dict = 2 if unlogit: lowerCAmelCase__ :List[str] = torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :str = p * torch.log(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[str] = 0 return -plogp.sum(dim=-1 ) def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" logger.info('lv, h >\t' + '\t'.join(F"{x + 1}" for x in range(len(_SCREAMING_SNAKE_CASE ) ) ) ) for row in range(len(_SCREAMING_SNAKE_CASE ) ): if tensor.dtype != torch.long: logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:.5f}" for x in tensor[row].cpu().data ) ) else: logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:d}" for x in tensor[row].cpu().data ) ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ :Dict = model.config.num_hidden_layers, model.config.num_attention_heads lowerCAmelCase__ :Any = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) lowerCAmelCase__ :Tuple = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) if head_mask is None: lowerCAmelCase__ :Optional[int] = torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) head_mask.requires_grad_(requires_grad=_SCREAMING_SNAKE_CASE ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCAmelCase__ :List[str] = None lowerCAmelCase__ :Any = 0.0 lowerCAmelCase__ :Any = 0.0 for step, inputs in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): lowerCAmelCase__ :str = tuple(t.to(args.device ) for t in inputs ) ((lowerCAmelCase__) , ) :Dict = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCAmelCase__ :str = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = entropy(attn.detach() , _SCREAMING_SNAKE_CASE ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_SCREAMING_SNAKE_CASE ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCAmelCase__ :Union[str, Any] = 2 lowerCAmelCase__ :Tuple = torch.pow(torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: lowerCAmelCase__ :str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) logger.info('Head ranked by importance scores' ) lowerCAmelCase__ :List[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCAmelCase__ :List[Any] = torch.arange( head_importance.numel() , device=args.device ) lowerCAmelCase__ :int = head_ranks.view_as(_SCREAMING_SNAKE_CASE ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) return attn_entropy, head_importance, total_loss def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , _SCREAMING_SNAKE_CASE , original_score * args.masking_threshold ) lowerCAmelCase__ :Optional[int] = torch.ones_like(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCAmelCase__ :List[str] = original_score while current_score >= original_score * args.masking_threshold: lowerCAmelCase__ :List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCAmelCase__ :str = float('Inf' ) lowerCAmelCase__ :List[str] = head_importance.view(-1 ).sort()[1] if len(_SCREAMING_SNAKE_CASE ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads lowerCAmelCase__ :int = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) lowerCAmelCase__ :Dict = new_head_mask.view(-1 ) lowerCAmelCase__ :Any = 0.0 lowerCAmelCase__ :Tuple = new_head_mask.view_as(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[int] = new_head_mask.clone().detach() print_ad_tensor(_SCREAMING_SNAKE_CASE ) # Compute metric and head importance again lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , _SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('Final head mask' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = 1 / loss lowerCAmelCase__ :Tuple = datetime.now() - before_time lowerCAmelCase__ :List[str] = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ :List[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_SCREAMING_SNAKE_CASE ) ) } for k, v in heads_to_prune.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Union[str, Any] = [ v, ] assert sum(len(_SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ :int = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , actually_pruned=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ :int = 1 / loss lowerCAmelCase__ :Tuple = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 100 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 ) save_model(_SCREAMING_SNAKE_CASE , args.output_dir ) def __A () ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=_SCREAMING_SNAKE_CASE , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=_SCREAMING_SNAKE_CASE , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=_SCREAMING_SNAKE_CASE , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=128 , type=_SCREAMING_SNAKE_CASE , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=_SCREAMING_SNAKE_CASE , help='Batch size.' ) parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 ) parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) lowerCAmelCase__ :Any = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCAmelCase__ :List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) lowerCAmelCase__ :Optional[int] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCAmelCase__ :Dict = torch.device('cuda' , args.local_rank ) lowerCAmelCase__ :Tuple = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCAmelCase__ :int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCAmelCase__ :Optional[Any] = nn.parallel.DistributedDataParallel( _SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_SCREAMING_SNAKE_CASE ) elif args.n_gpu > 1: lowerCAmelCase__ :Union[str, Any] = nn.DataParallel(_SCREAMING_SNAKE_CASE ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE ) # Prepare dataset lowerCAmelCase__ :Optional[int] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCAmelCase__ :Union[str, Any] = (torch.from_numpy(_SCREAMING_SNAKE_CASE ),) lowerCAmelCase__ :Optional[int] = TensorDataset(*_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = RandomSampler(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCAmelCase__ :Optional[Any] = mask_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) prune_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
293
1
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('Input must be an integer' ) if input_num <= 0: raise ValueError('Input must be positive' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
293
"""simple docstring""" import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = 1_0 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = [1, 2, 3, 4] lowerCAmelCase__ :Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = '' lowerCAmelCase__ , lowerCAmelCase__ :Any = process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) self.assertEqual(__UpperCAmelCase , [] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) lowerCAmelCase__ , lowerCAmelCase__ :str = process_story(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :List[str] = ['It was the best of times.'] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = torch.tensor([1, 2, 3, 4] ) lowerCAmelCase__ :List[str] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowerCAmelCase__ :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 2_3 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowerCAmelCase__ :Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 1_0_1 lowerCAmelCase__ :str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowerCAmelCase__ :Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowerCAmelCase__ :List[Any] = compute_token_type_ids(__UpperCAmelCase , __UpperCAmelCase ) np.testing.assert_array_equal(__UpperCAmelCase , __UpperCAmelCase )
293
1
"""simple docstring""" import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ('foo.json',)] ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :int = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__UpperCAmelCase , config_name=__UpperCAmelCase ) lowerCAmelCase__ :Dict = GenerationConfig.from_pretrained(__UpperCAmelCase , config_name=__UpperCAmelCase ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __UpperCAmelCase ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 5_0 ) self.assertEqual(loaded_config.max_length , 2_0 ) self.assertEqual(loaded_config.max_time , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = AutoConfig.from_pretrained('gpt2' ) lowerCAmelCase__ :Dict = GenerationConfig.from_model_config(__UpperCAmelCase ) lowerCAmelCase__ :Any = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = GenerationConfig() lowerCAmelCase__ :List[Any] = { 'max_new_tokens': 1_0_2_4, 'foo': 'bar', } lowerCAmelCase__ :Union[str, Any] = copy.deepcopy(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = generation_config.update(**__UpperCAmelCase ) # update_kwargs was not modified (no side effects) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__UpperCAmelCase , {'foo': 'bar'} ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = GenerationConfig() lowerCAmelCase__ :Optional[Any] = 'bar' with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir: generation_config.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Dict = GenerationConfig.from_pretrained(__UpperCAmelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , 'bar' ) lowerCAmelCase__ :str = GenerationConfig.from_model_config(__UpperCAmelCase ) assert not hasattr(__UpperCAmelCase , 'foo' ) # no new kwargs should be initialized if from config def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , __UpperCAmelCase ) self.assertEqual(default_config.num_beams , 1 ) lowerCAmelCase__ :Optional[Any] = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , __UpperCAmelCase ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = GenerationConfig.from_pretrained(__UpperCAmelCase , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , __UpperCAmelCase ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @classmethod def snake_case ( cls ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def snake_case ( cls ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id='test-generation-config' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' ) except HTTPError: pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('test-generation-config' , use_auth_token=self._token ) lowerCAmelCase__ :Optional[int] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='test-generation-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __UpperCAmelCase , repo_id='test-generation-config' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ :List[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token ) lowerCAmelCase__ :int = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __UpperCAmelCase , repo_id='valid_org/test-generation-config-org' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ :List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
293
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 'hf-internal-testing/tiny-random-t5' lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Any = tokenizer('This is me' , return_tensors='pt' ) lowerCAmelCase__ :Dict = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) lowerCAmelCase__ :Optional[Any] = model.generate(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) lowerCAmelCase__ :Union[str, Any] = model_reloaded.generate(**__UpperCAmelCase ) self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = 'hf-internal-testing/tiny-random-t5' lowerCAmelCase__ :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__UpperCAmelCase ): model.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = model.reverse_bettertransformer() model.save_pretrained(__UpperCAmelCase )
293
1
"""simple docstring""" import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" return EnvironmentCommand() class _lowerCAmelCase ( a ): """simple docstring""" @staticmethod def snake_case ( __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = parser.add_parser('env' ) download_parser.set_defaults(func=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = huggingface_hub.__version__ lowerCAmelCase__ :Any = 'not installed' lowerCAmelCase__ :Optional[int] = 'NA' if is_torch_available(): import torch lowerCAmelCase__ :List[str] = torch.__version__ lowerCAmelCase__ :Union[str, Any] = torch.cuda.is_available() lowerCAmelCase__ :Optional[int] = 'not installed' if is_transformers_available(): import transformers lowerCAmelCase__ :str = transformers.__version__ lowerCAmelCase__ :str = 'not installed' if is_accelerate_available(): import accelerate lowerCAmelCase__ :Optional[int] = accelerate.__version__ lowerCAmelCase__ :Dict = 'not installed' if is_xformers_available(): import xformers lowerCAmelCase__ :Any = xformers.__version__ lowerCAmelCase__ :Union[str, Any] = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': F"{pt_version} ({pt_cuda_available})", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(__UpperCAmelCase ) ) return info @staticmethod def snake_case ( __UpperCAmelCase ): '''simple docstring''' return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] __A = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] __A = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): __A = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" __A = { "km/h": 1.0, "m/s": 3.6, "mph": 1.60_9344, "knot": 1.852, } __A = { "km/h": 1.0, "m/s": 0.2_7777_7778, "mph": 0.6_2137_1192, "knot": 0.5_3995_6803, } def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float: """simple docstring""" if unit_to not in speed_chart or unit_from not in speed_chart_inverse: lowerCAmelCase__ :List[str] = ( F"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n" F"Valid values are: {', '.join(_SCREAMING_SNAKE_CASE )}" ) raise ValueError(_SCREAMING_SNAKE_CASE ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
293
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowerCAmelCase ( yaml.SafeLoader ): """simple docstring""" def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase__ :str = [tuple(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else key for key in keys] lowerCAmelCase__ :Optional[int] = Counter(__UpperCAmelCase ) lowerCAmelCase__ :int = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase ) self._check_no_duplicates_on_constructed_node(__UpperCAmelCase ) return mapping def __A (_SCREAMING_SNAKE_CASE ) ->Tuple[Optional[str], str]: """simple docstring""" lowerCAmelCase__ :Optional[Any] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase__ :Optional[int] = full_content[1:].index('---' ) + 1 lowerCAmelCase__ :Union[str, Any] = '\n'.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :List[str] = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(__UpperCAmelCase ) else: return cls() def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path.exists(): with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ :Optional[Any] = readme_file.read() else: lowerCAmelCase__ :Union[str, Any] = None lowerCAmelCase__ :Union[str, Any] = self._to_readme(__UpperCAmelCase ) with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file: readme_file.write(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase = None ): '''simple docstring''' if readme_content is not None: lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = _split_yaml_from_readme(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content else: lowerCAmelCase__ :str = '---\n' + self.to_yaml_string() + '---\n' return full_content @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase__ :int = { (key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' return yaml.safe_dump( { (key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' ) __A = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser __A = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") __A = ap.parse_args() __A = Path(args.readme_filepath) __A = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
293
1
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class _lowerCAmelCase : """simple docstring""" __magic_name__ :Union[str, Any] = None def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase__ :Optional[Any] = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ :Optional[int] = os.path.join(__UpperCAmelCase , 'feat_extract.json' ) feat_extract_first.to_json_file(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = self.feature_extraction_class.from_json_file(__UpperCAmelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ :Tuple = feat_extract_first.save_pretrained(__UpperCAmelCase )[0] check_json_file_has_correct_format(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = self.feature_extraction_class.from_pretrained(__UpperCAmelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = self.feature_extraction_class() self.assertIsNotNone(__UpperCAmelCase )
293
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
293
1
"""simple docstring""" from collections import Counter from timeit import timeit def __A (_SCREAMING_SNAKE_CASE = "" , ) ->bool: """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2 def __A (_SCREAMING_SNAKE_CASE = "" ) ->bool: """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) == 0: return True lowerCAmelCase__ :List[str] = input_str.replace(' ' , '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string lowerCAmelCase__ :dict[str, int] = {} for character in lower_case_input_str: lowerCAmelCase__ :Union[str, Any] = character_freq_dict.get(_SCREAMING_SNAKE_CASE , 0 ) + 1 lowerCAmelCase__ :List[Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def __A (_SCREAMING_SNAKE_CASE = "" ) ->None: """simple docstring""" print('\nFor string = ' , _SCREAMING_SNAKE_CASE , ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_SCREAMING_SNAKE_CASE ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) print( '> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_SCREAMING_SNAKE_CASE ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) if __name__ == "__main__": __A = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) __A = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
293
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __A = logging.getLogger(__name__) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase=-1 ): '''simple docstring''' lowerCAmelCase__ :Dict = label_idx def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Optional[Any] = mode.value lowerCAmelCase__ :List[str] = os.path.join(__UpperCAmelCase , F"{mode}.txt" ) lowerCAmelCase__ :List[str] = 1 lowerCAmelCase__ :Union[str, Any] = [] with open(__UpperCAmelCase , encoding='utf-8' ) as f: lowerCAmelCase__ :str = [] lowerCAmelCase__ :Dict = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 lowerCAmelCase__ :Tuple = [] lowerCAmelCase__ :List[str] = [] else: lowerCAmelCase__ :List[str] = line.split(' ' ) words.append(splits[0] ) if len(__UpperCAmelCase ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) return examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(__UpperCAmelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowerCAmelCase__ :Optional[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(__UpperCAmelCase ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: lowerCAmelCase__ :Any = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ :Union[str, Any] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__(label_idx=-2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: lowerCAmelCase__ :str = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ :Optional[Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _lowerCAmelCase ( a ): """simple docstring""" def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = mode.value lowerCAmelCase__ :Union[str, Any] = os.path.join(__UpperCAmelCase , F"{mode}.txt" ) lowerCAmelCase__ :Any = 1 lowerCAmelCase__ :Optional[Any] = [] with open(__UpperCAmelCase , encoding='utf-8' ) as f: for sentence in parse_incr(__UpperCAmelCase ): lowerCAmelCase__ :Dict = [] lowerCAmelCase__ :Dict = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 return examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = 0 for sentence in parse_incr(__UpperCAmelCase ): lowerCAmelCase__ :Optional[int] = preds_list[example_id] lowerCAmelCase__ :Tuple = '' for token in sentence: out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(__UpperCAmelCase ) example_id += 1 def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
293
1
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __A = pd.read_csv( """https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/""" """position_salaries.csv""" ) __A = dataset.iloc[:, 1:2].values __A = dataset.iloc[:, 2].values __A , __A , __A , __A = train_test_split(X, y, test_size=0.2, random_state=0) __A = PolynomialFeatures(degree=4) __A = poly_reg.fit_transform(X) __A = LinearRegression() pol_reg.fit(X_poly, y) def __A () ->List[Any]: """simple docstring""" plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='red' ) plt.plot(_SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(_SCREAMING_SNAKE_CASE ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
293
"""simple docstring""" from __future__ import annotations __A = tuple[int, int, int] __A = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase __A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" # -------------------------- default selection -------------------------- # rotors -------------------------- __A = """EGZWVONAHDCLFQMSIPJBYUKXTR""" __A = """FOBHMDKEXQNRAULPGSJVTYICZW""" __A = """ZJXESIUQLHAVRMDOYGTNFWPBKC""" # reflector -------------------------- __A = { """A""": """N""", """N""": """A""", """B""": """O""", """O""": """B""", """C""": """P""", """P""": """C""", """D""": """Q""", """Q""": """D""", """E""": """R""", """R""": """E""", """F""": """S""", """S""": """F""", """G""": """T""", """T""": """G""", """H""": """U""", """U""": """H""", """I""": """V""", """V""": """I""", """J""": """W""", """W""": """J""", """K""": """X""", """X""": """K""", """L""": """Y""", """Y""": """L""", """M""": """Z""", """Z""": """M""", } # -------------------------- extra rotors -------------------------- __A = """RMDJXFUWGISLHVTCQNKYPBEZOA""" __A = """SGLCPQWZHKXAREONTFBVIYJUDM""" __A = """HVSICLTYKQUBXDWAJZOMFGPREN""" __A = """RZWQHFMVDBKICJLNTUXAGYPSOE""" __A = """LFKIJODBEGAMQPXVUHYSTCZRWN""" __A = """KOAEGVDHXPQZMLFTYWJNBRCIUS""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: """simple docstring""" if (unique_rotsel := len(set(_SCREAMING_SNAKE_CASE ) )) < 3: lowerCAmelCase__ :Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})" raise Exception(_SCREAMING_SNAKE_CASE ) # Checks if rotor positions are valid lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = rotpos if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Tuple = F"First rotor position is not within range of 1..26 ({rotorposa}" raise ValueError(_SCREAMING_SNAKE_CASE ) if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_SCREAMING_SNAKE_CASE ) if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Union[str, Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_SCREAMING_SNAKE_CASE ) # Validates string and returns dict lowerCAmelCase__ :int = _plugboard(_SCREAMING_SNAKE_CASE ) return rotpos, rotsel, pbdict def __A (_SCREAMING_SNAKE_CASE ) ->dict[str, str]: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :str = F"Plugboard setting isn't type string ({type(_SCREAMING_SNAKE_CASE )})" raise TypeError(_SCREAMING_SNAKE_CASE ) elif len(_SCREAMING_SNAKE_CASE ) % 2 != 0: lowerCAmelCase__ :str = F"Odd number of symbols ({len(_SCREAMING_SNAKE_CASE )})" raise Exception(_SCREAMING_SNAKE_CASE ) elif pbstring == "": return {} pbstring.replace(' ' , '' ) # Checks if all characters are unique lowerCAmelCase__ :Any = set() for i in pbstring: if i not in abc: lowerCAmelCase__ :Any = F"'{i}' not in list of symbols" raise Exception(_SCREAMING_SNAKE_CASE ) elif i in tmppbl: lowerCAmelCase__ :Dict = F"Duplicate symbol ({i})" raise Exception(_SCREAMING_SNAKE_CASE ) else: tmppbl.add(_SCREAMING_SNAKE_CASE ) del tmppbl # Created the dictionary lowerCAmelCase__ :List[Any] = {} for j in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ): lowerCAmelCase__ :Optional[int] = pbstring[j + 1] lowerCAmelCase__ :Union[str, Any] = pbstring[j] return pb def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE = "" , ) ->str: """simple docstring""" lowerCAmelCase__ :Tuple = text.upper() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = _validator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , plugb.upper() ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = rotor_position lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 lowerCAmelCase__ :Dict = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: lowerCAmelCase__ :Dict = plugboard[symbol] # rotor ra -------------------------- lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :str = rotora[index % len(_SCREAMING_SNAKE_CASE )] # rotor rb -------------------------- lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :int = rotora[index % len(_SCREAMING_SNAKE_CASE )] # rotor rc -------------------------- lowerCAmelCase__ :str = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :Optional[Any] = rotora[index % len(_SCREAMING_SNAKE_CASE )] # reflector -------------------------- # this is the reason you don't need another machine to decipher lowerCAmelCase__ :str = reflector[symbol] # 2nd rotors lowerCAmelCase__ :Tuple = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] lowerCAmelCase__ :Optional[int] = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] lowerCAmelCase__ :Any = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] # 2nd plugboard if symbol in plugboard: lowerCAmelCase__ :Union[str, Any] = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :str = 0 rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :List[Any] = 0 rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_SCREAMING_SNAKE_CASE ) return "".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = """This is my Python script that emulates the Enigma machine from WWII.""" __A = (1, 1, 1) __A = """pictures""" __A = (rotora, rotora, rotora) __A = enigma(message, rotor_pos, rotor_sel, pb) print("""Encrypted message:""", en) print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
293
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A = logging.get_logger(__name__) __A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __A = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } __A = { """gpt2""": 1024, """gpt2-medium""": 1024, """gpt2-large""": 1024, """gpt2-xl""": 1024, """distilgpt2""": 1024, } class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :str = VOCAB_FILES_NAMES __magic_name__ :str = PRETRAINED_VOCAB_FILES_MAP __magic_name__ :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ :int = ["""input_ids""", """attention_mask"""] __magic_name__ :Optional[Any] = GPTaTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( __UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) lowerCAmelCase__ :List[str] = kwargs.pop('add_bos_token' , __UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space: lowerCAmelCase__ :Optional[Any] = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) ) lowerCAmelCase__ :Optional[int] = add_prefix_space lowerCAmelCase__ :Optional[Any] = pre_tok_class(**__UpperCAmelCase ) lowerCAmelCase__ :int = add_prefix_space def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :Dict = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] ) if len(__UpperCAmelCase ) > self.model_max_length: lowerCAmelCase__ :Any = input_ids[-self.model_max_length :] return input_ids
293
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def __A (_SCREAMING_SNAKE_CASE ) ->Tuple: """simple docstring""" lowerCAmelCase__ :List[str] = np.max(_outputs , axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Any = """sigmoid""" __magic_name__ :Optional[Any] = """softmax""" __magic_name__ :Optional[Any] = """none""" @add_end_docstrings( a , r""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `\"default\"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `\"sigmoid\"`: Applies the sigmoid function on the output. - `\"softmax\"`: Applies the softmax function on the output. - `\"none\"`: Does not apply any function on the output. """ , ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Union[str, Any] = False __magic_name__ :Dict = ClassificationFunction.NONE def __init__( self , **__UpperCAmelCase ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = tokenizer_kwargs lowerCAmelCase__ :List[Any] = {} if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None: lowerCAmelCase__ :List[Any] = self.model.config.return_all_scores if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None: lowerCAmelCase__ :int = top_k lowerCAmelCase__ :Dict = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __UpperCAmelCase , ) if return_all_scores: lowerCAmelCase__ :List[Any] = None else: lowerCAmelCase__ :Union[str, Any] = 1 if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowerCAmelCase__ :List[Any] = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowerCAmelCase__ :Optional[Any] = 'top_k' not in kwargs if isinstance(args[0] , __UpperCAmelCase ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = self.framework if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.model(**__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True ): '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowerCAmelCase__ :str = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowerCAmelCase__ :int = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None: lowerCAmelCase__ :Optional[Any] = self.model.config.function_to_apply else: lowerCAmelCase__ :Dict = ClassificationFunction.NONE lowerCAmelCase__ :int = model_outputs['logits'][0] lowerCAmelCase__ :Union[str, Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowerCAmelCase__ :Dict = sigmoid(__UpperCAmelCase ) elif function_to_apply == ClassificationFunction.SOFTMAX: lowerCAmelCase__ :int = softmax(__UpperCAmelCase ) elif function_to_apply == ClassificationFunction.NONE: lowerCAmelCase__ :Tuple = outputs else: raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowerCAmelCase__ :Any = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCAmelCase ) ] if not _legacy: dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase ) if top_k is not None: lowerCAmelCase__ :List[str] = dict_scores[:top_k] return dict_scores
293
1
"""simple docstring""" from __future__ import annotations def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[float, list[float]]: """simple docstring""" lowerCAmelCase__ :List[Any] = list(range(len(_SCREAMING_SNAKE_CASE ) ) ) lowerCAmelCase__ :str = [v / w for v, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] index.sort(key=lambda _SCREAMING_SNAKE_CASE : ratio[i] , reverse=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :float = 0 lowerCAmelCase__ :list[float] = [0] * len(_SCREAMING_SNAKE_CASE ) for i in index: if weight[i] <= capacity: lowerCAmelCase__ :Optional[int] = 1 max_value += value[i] capacity -= weight[i] else: lowerCAmelCase__ :Tuple = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
293
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) lowerCAmelCase__ :Union[str, Any] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) ) return round(_SCREAMING_SNAKE_CASE , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
293
1
"""simple docstring""" from typing import List from .keymap import KEYMAP, get_character def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" def decorator(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Dict = getattr(_SCREAMING_SNAKE_CASE , 'handle_key' , [] ) handle += [key] setattr(_SCREAMING_SNAKE_CASE , 'handle_key' , _SCREAMING_SNAKE_CASE ) return func return decorator def __A (*_SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" def decorator(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Any = getattr(_SCREAMING_SNAKE_CASE , 'handle_key' , [] ) handle += keys setattr(_SCREAMING_SNAKE_CASE , 'handle_key' , _SCREAMING_SNAKE_CASE ) return func return decorator class _lowerCAmelCase ( a ): """simple docstring""" def __new__( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().__new__(cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if not hasattr(__UpperCAmelCase , 'key_handler' ): setattr(__UpperCAmelCase , 'key_handler' , {} ) setattr(__UpperCAmelCase , 'handle_input' , KeyHandler.handle_input ) for value in attrs.values(): lowerCAmelCase__ :List[Any] = getattr(__UpperCAmelCase , 'handle_key' , [] ) for key in handled_keys: lowerCAmelCase__ :Any = value return new_cls @staticmethod def snake_case ( cls ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = get_character() if char != KEYMAP["undefined"]: lowerCAmelCase__ :Optional[int] = ord(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = cls.key_handler.get(__UpperCAmelCase ) if handler: lowerCAmelCase__ :List[Any] = char return handler(cls ) else: return None def __A (cls ) ->Tuple: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""OwlViTFeatureExtractor"""] __A = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = parent lowerCAmelCase__ :List[Any] = config_class lowerCAmelCase__ :List[Any] = has_text_modality lowerCAmelCase__ :str = kwargs lowerCAmelCase__ :Union[str, Any] = common_properties def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = self.config_class(**self.inputs_dict ) lowerCAmelCase__ :Union[str, Any] = ( ['hidden_size', 'num_attention_heads', 'num_hidden_layers'] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['vocab_size'] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) , msg=F"`{prop}` does not exist" ) # Test that config has the common properties as setter for idx, name in enumerate(__UpperCAmelCase ): try: setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.parent.assertEqual( getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase , msg=F"`{name} value {idx} expected, but was {getattr(__UpperCAmelCase , __UpperCAmelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(__UpperCAmelCase ): try: lowerCAmelCase__ :Any = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase , msg=F"`{name} value {idx} expected, but was {getattr(__UpperCAmelCase , __UpperCAmelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.config_class(**self.inputs_dict ) lowerCAmelCase__ :Tuple = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ :Optional[Any] = os.path.join(__UpperCAmelCase , 'config.json' ) config_first.to_json_file(__UpperCAmelCase ) lowerCAmelCase__ :int = self.config_class.from_json_file(__UpperCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = self.config_class.from_pretrained(__UpperCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = self.config_class(**self.inputs_dict ) lowerCAmelCase__ :List[Any] = 'test' with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ :Tuple = os.path.join(__UpperCAmelCase , __UpperCAmelCase ) config_first.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = self.config_class.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) lowerCAmelCase__ :Any = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def snake_case ( self ): '''simple docstring''' if self.config_class.is_composition: return lowerCAmelCase__ :List[Any] = self.config_class() self.parent.assertIsNotNone(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = copy.deepcopy(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = self.config_class(**__UpperCAmelCase ) lowerCAmelCase__ :int = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) ) elif getattr(__UpperCAmelCase , __UpperCAmelCase ) != value: wrong_values.append((key, getattr(__UpperCAmelCase , __UpperCAmelCase ), value) ) if len(__UpperCAmelCase ) > 0: lowerCAmelCase__ :List[str] = '\n'.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] ) raise ValueError(F"The following keys were not properly set in the config:\n{errors}" ) def snake_case ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
293
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' pass def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __A = ( """https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png""" ) @is_pipeline_test @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __magic_name__ :str = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = pipeline( 'document-question-answering' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) lowerCAmelCase__ :List[Any] = 'What is the placebo?' lowerCAmelCase__ :Dict = [ { 'image': load_image(__UpperCAmelCase ), 'question': question, }, { 'image': image, 'question': question, }, { 'image': image, 'question': question, 'word_boxes': word_boxes, }, ] return dqa_pipeline, examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :int = dqa_pipeline(__UpperCAmelCase , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )}, {'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' ) lowerCAmelCase__ :Union[str, Any] = INVOICE_URL lowerCAmelCase__ :Tuple = 'How many cats are there?' lowerCAmelCase__ :List[str] = [ {'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9}, {'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0}, ] lowerCAmelCase__ :Any = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase ) lowerCAmelCase__ :Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably lowerCAmelCase__ :List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png' lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(__UpperCAmelCase , [] ) # We can optionnally pass directly the words and bounding boxes lowerCAmelCase__ :Dict = './tests/fixtures/tests_samples/COCO/000000039769.png' lowerCAmelCase__ :List[str] = [] lowerCAmelCase__ :int = [] lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 ) self.assertEqual(__UpperCAmelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , ) lowerCAmelCase__ :str = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :Dict = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , ) lowerCAmelCase__ :List[Any] = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :Optional[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :int = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , ) lowerCAmelCase__ :List[str] = INVOICE_URL lowerCAmelCase__ :Any = 'What is the invoice number?' lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) lowerCAmelCase__ :Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] ] * 2 , ) lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) # This model should also work if `image` is set to None lowerCAmelCase__ :Tuple = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) @slow @require_torch @require_pytesseract @require_vision def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , max_seq_len=5_0 , ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] ] * 2 , ) lowerCAmelCase__ :Optional[Any] = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) # This model should also work if `image` is set to None lowerCAmelCase__ :List[str] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) @slow @require_torch def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = pipeline( 'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :str = 'What is the invoice number?' lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'answer': 'us-001'}] ) @require_tf @unittest.skip('Document question answering not implemented in TF' ) def snake_case ( self ): '''simple docstring''' pass
293
1
"""simple docstring""" import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = 1_0 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = [1, 2, 3, 4] lowerCAmelCase__ :Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = '' lowerCAmelCase__ , lowerCAmelCase__ :Any = process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) self.assertEqual(__UpperCAmelCase , [] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) lowerCAmelCase__ , lowerCAmelCase__ :str = process_story(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :List[str] = ['It was the best of times.'] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = torch.tensor([1, 2, 3, 4] ) lowerCAmelCase__ :List[str] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowerCAmelCase__ :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 2_3 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowerCAmelCase__ :Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 1_0_1 lowerCAmelCase__ :str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowerCAmelCase__ :Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowerCAmelCase__ :List[Any] = compute_token_type_ids(__UpperCAmelCase , __UpperCAmelCase ) np.testing.assert_array_equal(__UpperCAmelCase , __UpperCAmelCase )
293
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :Tuple = StableDiffusionXLImgaImgPipeline __magic_name__ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} __magic_name__ :Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} __magic_name__ :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __magic_name__ :str = IMAGE_TO_IMAGE_IMAGE_PARAMS __magic_name__ :Any = IMAGE_TO_IMAGE_IMAGE_PARAMS def snake_case ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ :Optional[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , ) lowerCAmelCase__ :str = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , ) torch.manual_seed(0 ) lowerCAmelCase__ :str = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCAmelCase__ :str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , ) lowerCAmelCase__ :int = CLIPTextModel(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase ) lowerCAmelCase__ :Any = CLIPTextModelWithProjection(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase ) lowerCAmelCase__ :str = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith('mps' ): lowerCAmelCase__ :Optional[int] = torch.manual_seed(__UpperCAmelCase ) else: lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :int = self.get_dummy_components() lowerCAmelCase__ :List[str] = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :int = sd_pipe(**__UpperCAmelCase ).images lowerCAmelCase__ :int = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase__ :List[str] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def snake_case ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.get_dummy_components() lowerCAmelCase__ :str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # forward without prompt embeds lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt'] lowerCAmelCase__ :Tuple = negative_prompt lowerCAmelCase__ :str = 3 * [inputs['prompt']] lowerCAmelCase__ :Optional[Any] = sd_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = 3 * ['this is a negative prompt'] lowerCAmelCase__ :str = 3 * [inputs.pop('prompt' )] ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) :List[str] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe( **__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , ) lowerCAmelCase__ :Optional[Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ): '''simple docstring''' lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) ) lowerCAmelCase__ :Optional[int] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) lowerCAmelCase__ :int = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase ) lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase__ :List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
293
1
"""simple docstring""" import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :int = DownBlockaD # noqa F405 __magic_name__ :int = """down""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Union[str, Any] = ResnetDownsampleBlockaD # noqa F405 __magic_name__ :Any = """down""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :int = AttnDownBlockaD # noqa F405 __magic_name__ :Optional[Any] = """down""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Optional[Any] = CrossAttnDownBlockaD # noqa F405 __magic_name__ :Optional[int] = """down""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :List[str] = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase__ :List[str] = 3_2 return init_dict, inputs_dict def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :int = SimpleCrossAttnDownBlockaD # noqa F405 __magic_name__ :int = """down""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase__ :Any = 3_2 return init_dict, inputs_dict @unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :int = SkipDownBlockaD # noqa F405 __magic_name__ :int = """down""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :str = AttnSkipDownBlockaD # noqa F405 __magic_name__ :str = """down""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Dict = DownEncoderBlockaD # noqa F405 __magic_name__ :Union[str, Any] = """down""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_temb=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = { 'in_channels': 3_2, 'out_channels': 3_2, } lowerCAmelCase__ :Optional[int] = self.dummy_input return init_dict, inputs_dict def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :int = AttnDownEncoderBlockaD # noqa F405 __magic_name__ :Tuple = """down""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_temb=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = { 'in_channels': 3_2, 'out_channels': 3_2, } lowerCAmelCase__ :Union[str, Any] = self.dummy_input return init_dict, inputs_dict def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Union[str, Any] = UNetMidBlockaD # noqa F405 __magic_name__ :Union[str, Any] = """mid""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = { 'in_channels': 3_2, 'temb_channels': 1_2_8, } lowerCAmelCase__ :List[Any] = self.dummy_input return init_dict, inputs_dict def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :int = UNetMidBlockaDCrossAttn # noqa F405 __magic_name__ :str = """mid""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase__ :str = 3_2 return init_dict, inputs_dict def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405 __magic_name__ :List[Any] = """mid""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase__ :int = 3_2 return init_dict, inputs_dict def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[str] = UpBlockaD # noqa F405 __magic_name__ :Optional[int] = """up""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :int = ResnetUpsampleBlockaD # noqa F405 __magic_name__ :str = """up""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :int = CrossAttnUpBlockaD # noqa F405 __magic_name__ :Optional[Any] = """up""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase__ :List[str] = 3_2 return init_dict, inputs_dict def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Dict = SimpleCrossAttnUpBlockaD # noqa F405 __magic_name__ :str = """up""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase , include_encoder_hidden_states=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = super().prepare_init_args_and_inputs_for_common() lowerCAmelCase__ :Optional[int] = 3_2 return init_dict, inputs_dict def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :int = AttnUpBlockaD # noqa F405 __magic_name__ :List[str] = """up""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) @unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Tuple = SkipUpBlockaD # noqa F405 __magic_name__ :List[str] = """up""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Optional[int] = AttnSkipUpBlockaD # noqa F405 __magic_name__ :Tuple = """up""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Optional[int] = UpDecoderBlockaD # noqa F405 __magic_name__ :Union[str, Any] = """up""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_temb=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = {'in_channels': 3_2, 'out_channels': 3_2} lowerCAmelCase__ :Optional[Any] = self.dummy_input return init_dict, inputs_dict def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37] super().test_output(__UpperCAmelCase ) class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[Any] = AttnUpDecoderBlockaD # noqa F405 __magic_name__ :Any = """up""" @property def snake_case ( self ): '''simple docstring''' return super().get_dummy_input(include_temb=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = {'in_channels': 3_2, 'out_channels': 3_2} lowerCAmelCase__ :Tuple = self.dummy_input return init_dict, inputs_dict def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68] super().test_output(__UpperCAmelCase )
293
"""simple docstring""" import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" lowerCAmelCase__ :str = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(F"Building PyTorch model from configuration: {config}" ) lowerCAmelCase__ :int = BertForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __A = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
293
1
"""simple docstring""" import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __A = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def __A (_SCREAMING_SNAKE_CASE ) ->str: """simple docstring""" lowerCAmelCase__ :List[Any] = {} state_dict.pop('pixel_mean' , _SCREAMING_SNAKE_CASE ) state_dict.pop('pixel_std' , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :int = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowerCAmelCase__ :Union[str, Any] = key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Dict = int(re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(2 ) ) if layer_nb == 0: lowerCAmelCase__ :Optional[Any] = key.replace('layers.0' , 'proj_in' ) elif layer_nb == 1: lowerCAmelCase__ :List[Any] = key.replace('layers.1' , 'layers.0' ) elif layer_nb == 2: lowerCAmelCase__ :str = key.replace('layers.2' , 'proj_out' ) lowerCAmelCase__ :List[Any] = value lowerCAmelCase__ :int = model_state_dict[ 'prompt_encoder.shared_embedding.positional_embedding' ] return model_state_dict def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ) ->Tuple: """simple docstring""" lowerCAmelCase__ :Optional[int] = hf_hub_download(_SCREAMING_SNAKE_CASE , F"checkpoints/{model_name}.pth" ) if "sam_vit_b" in model_name: lowerCAmelCase__ :List[str] = SamConfig() elif "sam_vit_l" in model_name: lowerCAmelCase__ :Any = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) lowerCAmelCase__ :Optional[Any] = SamConfig( vision_config=_SCREAMING_SNAKE_CASE , ) elif "sam_vit_h" in model_name: lowerCAmelCase__ :str = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) lowerCAmelCase__ :List[str] = SamConfig( vision_config=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ :Optional[int] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) lowerCAmelCase__ :Dict = replace_keys(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[str] = SamImageProcessor() lowerCAmelCase__ :str = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[Any] = SamModel(_SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = hf_model.to('cuda' ) lowerCAmelCase__ :Tuple = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png' lowerCAmelCase__ :str = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' ) lowerCAmelCase__ :str = [[[400, 650]]] lowerCAmelCase__ :Optional[int] = [[1]] lowerCAmelCase__ :List[Any] = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): lowerCAmelCase__ :Any = hf_model(**_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Union[str, Any] = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8 lowerCAmelCase__ :List[Any] = processor( images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): lowerCAmelCase__ :int = hf_model(**_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Union[str, Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4 lowerCAmelCase__ :Dict = ((75, 275, 1725, 850),) lowerCAmelCase__ :Any = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): lowerCAmelCase__ :Any = hf_model(**_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[int] = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4 # Test with 2 points and 1 image. lowerCAmelCase__ :List[str] = [[[400, 650], [800, 650]]] lowerCAmelCase__ :Optional[int] = [[1, 1]] lowerCAmelCase__ :List[str] = processor( images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): lowerCAmelCase__ :List[str] = hf_model(**_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :int = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2 if __name__ == "__main__": __A = argparse.ArgumentParser() __A = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) __A = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
293
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[str] = XGLMTokenizer __magic_name__ :Any = XGLMTokenizerFast __magic_name__ :Dict = True __magic_name__ :Union[str, Any] = True def snake_case ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ :int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = '<pad>' lowerCAmelCase__ :int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCAmelCase__ :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) lowerCAmelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def snake_case ( self ): '''simple docstring''' return XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) def snake_case ( self ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__UpperCAmelCase , f.name ) lowerCAmelCase__ :Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = pickle.dumps(__UpperCAmelCase ) pickle.loads(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCAmelCase__ :Optional[Any] = self.get_tokenizer() lowerCAmelCase__ :List[str] = self.get_rust_tokenizer() lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.' lowerCAmelCase__ :Dict = tokenizer.tokenize(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :int = self.get_rust_tokenizer() lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = 'Hello World!' lowerCAmelCase__ :Tuple = [2, 3_1_2_2_7, 4_4_4_7, 3_5] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off lowerCAmelCase__ :List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5] # fmt: on self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = { 'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='facebook/xglm-564M' , padding=__UpperCAmelCase , )
293
1
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple: """simple docstring""" if height >= 1: move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) move_disk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]: """simple docstring""" print('moving disk from' , _SCREAMING_SNAKE_CASE , 'to' , _SCREAMING_SNAKE_CASE ) def __A () ->Any: """simple docstring""" lowerCAmelCase__ :List[str] = int(input('Height of hanoi: ' ).strip() ) move_tower(_SCREAMING_SNAKE_CASE , 'A' , 'B' , 'C' ) if __name__ == "__main__": main()
293
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __A = Lock() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowerCAmelCase__ :Any = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowerCAmelCase__ :Optional[int] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowerCAmelCase__ :Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ :str = [] lowerCAmelCase__ :Optional[Any] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowerCAmelCase__ :List[str] = Pipe() lowerCAmelCase__ :List[Any] = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowerCAmelCase__ :Dict = temp_rs lowerCAmelCase__ :Optional[Any] = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): lowerCAmelCase__ :Union[str, Any] = Pipe() lowerCAmelCase__ :List[str] = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowerCAmelCase__ :Union[str, Any] = temp_rs lowerCAmelCase__ :Any = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): lowerCAmelCase__ :str = result_pipe[p][0].recv() process_array_[p].join() return arr def __A () ->List[Any]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[str] = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
293
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def __A (_SCREAMING_SNAKE_CASE ) ->list[list[float]]: """simple docstring""" lowerCAmelCase__ :Any = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowerCAmelCase__ :int = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creates a copy of the matrix with swapped positions of the elements lowerCAmelCase__ :Any = [[0.0, 0.0], [0.0, 0.0]] lowerCAmelCase__ , lowerCAmelCase__ :int = matrix[1][1], matrix[0][0] lowerCAmelCase__ , lowerCAmelCase__ :Any = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_SCREAMING_SNAKE_CASE ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowerCAmelCase__ :int = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creating cofactor matrix lowerCAmelCase__ :Dict = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowerCAmelCase__ :Optional[int] = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowerCAmelCase__ :Optional[Any] = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowerCAmelCase__ :List[str] = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowerCAmelCase__ :Dict = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowerCAmelCase__ :Union[str, Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowerCAmelCase__ :Tuple = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowerCAmelCase__ :Optional[int] = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowerCAmelCase__ :Optional[int] = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowerCAmelCase__ :Optional[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowerCAmelCase__ :Union[str, Any] = array(_SCREAMING_SNAKE_CASE ) for i in range(3 ): for j in range(3 ): lowerCAmelCase__ :str = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowerCAmelCase__ :Union[str, Any] = array(_SCREAMING_SNAKE_CASE ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_SCREAMING_SNAKE_CASE ) # Calculate the inverse of the matrix return [[float(d(_SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
293
"""simple docstring""" from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(a ) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , **__UpperCAmelCase ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) requires_backends(self , 'vision' ) requires_backends(self , 'torch' ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) self.check_model_type(__UpperCAmelCase ) def snake_case ( self , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = {} lowerCAmelCase__ :Tuple = {} lowerCAmelCase__ :Any = {} # preprocess args if "points_per_batch" in kwargs: lowerCAmelCase__ :Dict = kwargs['points_per_batch'] if "points_per_crop" in kwargs: lowerCAmelCase__ :Union[str, Any] = kwargs['points_per_crop'] if "crops_n_layers" in kwargs: lowerCAmelCase__ :Any = kwargs['crops_n_layers'] if "crop_overlap_ratio" in kwargs: lowerCAmelCase__ :Any = kwargs['crop_overlap_ratio'] if "crop_n_points_downscale_factor" in kwargs: lowerCAmelCase__ :Dict = kwargs['crop_n_points_downscale_factor'] # postprocess args if "pred_iou_thresh" in kwargs: lowerCAmelCase__ :Tuple = kwargs['pred_iou_thresh'] if "stability_score_offset" in kwargs: lowerCAmelCase__ :Optional[int] = kwargs['stability_score_offset'] if "mask_threshold" in kwargs: lowerCAmelCase__ :List[Any] = kwargs['mask_threshold'] if "stability_score_thresh" in kwargs: lowerCAmelCase__ :Optional[Any] = kwargs['stability_score_thresh'] if "crops_nms_thresh" in kwargs: lowerCAmelCase__ :int = kwargs['crops_nms_thresh'] if "output_rle_mask" in kwargs: lowerCAmelCase__ :Union[str, Any] = kwargs['output_rle_mask'] if "output_bboxes_mask" in kwargs: lowerCAmelCase__ :Optional[Any] = kwargs['output_bboxes_mask'] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , *__UpperCAmelCase , num_workers=__UpperCAmelCase , batch_size=__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=6_4 , __UpperCAmelCase = 0 , __UpperCAmelCase = 5_1_2 / 1_5_0_0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 1 , ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = load_image(__UpperCAmelCase ) lowerCAmelCase__ :int = self.image_processor.size['longest_edge'] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = self.image_processor.generate_crop_boxes( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = self.image_processor(images=__UpperCAmelCase , return_tensors='pt' ) with self.device_placement(): if self.framework == "pt": lowerCAmelCase__ :Optional[int] = self.get_inference_context() with inference_context(): lowerCAmelCase__ :Any = self._ensure_tensor_on_device(__UpperCAmelCase , device=self.device ) lowerCAmelCase__ :Tuple = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) ) lowerCAmelCase__ :Optional[int] = image_embeddings lowerCAmelCase__ :List[Any] = grid_points.shape[1] lowerCAmelCase__ :Union[str, Any] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( 'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ' 'To return all points at once, set points_per_batch to None' ) for i in range(0 , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Optional[Any] = grid_points[:, i : i + points_per_batch, :, :] lowerCAmelCase__ :List[str] = input_labels[:, i : i + points_per_batch] lowerCAmelCase__ :List[Any] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0.88 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , ): '''simple docstring''' lowerCAmelCase__ :Any = model_inputs.pop('input_boxes' ) lowerCAmelCase__ :Optional[int] = model_inputs.pop('is_last' ) lowerCAmelCase__ :Dict = model_inputs.pop('original_sizes' ).tolist() lowerCAmelCase__ :Dict = model_inputs.pop('reshaped_input_sizes' ).tolist() lowerCAmelCase__ :Optional[int] = self.model(**__UpperCAmelCase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks lowerCAmelCase__ :int = model_outputs['pred_masks'] lowerCAmelCase__ :Optional[Any] = self.image_processor.post_process_masks( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , binarize=__UpperCAmelCase ) lowerCAmelCase__ :Any = model_outputs['iou_scores'] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.7 , ): '''simple docstring''' lowerCAmelCase__ :Dict = [] lowerCAmelCase__ :Optional[Any] = [] lowerCAmelCase__ :int = [] for model_output in model_outputs: all_scores.append(model_output.pop('iou_scores' ) ) all_masks.extend(model_output.pop('masks' ) ) all_boxes.append(model_output.pop('boxes' ) ) lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase ) lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self.image_processor.post_process_for_mask_generation( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Tuple = defaultdict(__UpperCAmelCase ) for output in model_outputs: for k, v in output.items(): extra[k].append(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = {} if output_rle_mask: lowerCAmelCase__ :str = rle_mask if output_bboxes_mask: lowerCAmelCase__ :Optional[int] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
293
1
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A = """\ """ __A = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ __A = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def snake_case ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1_6 , __UpperCAmelCase = True , __UpperCAmelCase=None ): '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": lowerCAmelCase__ :Tuple = 'cuda' else: lowerCAmelCase__ :List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu' lowerCAmelCase__ :List[str] = AutoModelForCausalLM.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = model.to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = AutoTokenizer.from_pretrained(__UpperCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: lowerCAmelCase__ :Optional[Any] = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" lowerCAmelCase__ :List[str] = model.config.max_length - 1 else: lowerCAmelCase__ :Dict = model.config.max_length lowerCAmelCase__ :List[Any] = tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors='pt' , return_attention_mask=__UpperCAmelCase , ).to(__UpperCAmelCase ) lowerCAmelCase__ :Any = encodings['input_ids'] lowerCAmelCase__ :Optional[int] = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." lowerCAmelCase__ :Tuple = [] lowerCAmelCase__ :List[Any] = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ) ): lowerCAmelCase__ :Optional[int] = min(start_index + batch_size , len(__UpperCAmelCase ) ) lowerCAmelCase__ :Union[str, Any] = encoded_texts[start_index:end_index] lowerCAmelCase__ :Tuple = attn_masks[start_index:end_index] if add_start_token: lowerCAmelCase__ :int = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) lowerCAmelCase__ :Dict = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCAmelCase ), attn_mask] , dim=1 ) lowerCAmelCase__ :Optional[int] = encoded_batch with torch.no_grad(): lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).logits lowerCAmelCase__ :str = out_logits[..., :-1, :].contiguous() lowerCAmelCase__ :Dict = labels[..., 1:].contiguous() lowerCAmelCase__ :Tuple = attn_mask[..., 1:].contiguous() lowerCAmelCase__ :Dict = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCAmelCase )}
293
"""simple docstring""" from __future__ import annotations __A = 1.6_021e-19 # units = C def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple[str, float]: """simple docstring""" if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
293
1
"""simple docstring""" import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A = logging.get_logger(__name__) __A = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""} __A = { """vocab_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""", }, """emoji_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""", }, } __A = { """abeja/gpt-neox-japanese-2.7b""": 2048, } def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: lowerCAmelCase__ :Optional[Any] = json.loads(f.read() ) lowerCAmelCase__ :Optional[Any] = collections.OrderedDict() lowerCAmelCase__ :List[Any] = collections.OrderedDict() lowerCAmelCase__ :List[str] = collections.OrderedDict() with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: lowerCAmelCase__ :str = f.readlines() lowerCAmelCase__ :Optional[int] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Tuple = b lowerCAmelCase__ :Optional[int] = idx for wd in b: lowerCAmelCase__ :Any = idx return vocab, raw_vocab, ids_to_tokens, emoji class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Optional[Any] = VOCAB_FILES_NAMES __magic_name__ :Any = PRETRAINED_VOCAB_FILES_MAP __magic_name__ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ :List[str] = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|startoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , do_clean_text=__UpperCAmelCase , **__UpperCAmelCase , ) if not os.path.isfile(__UpperCAmelCase ): raise ValueError( F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) if not os.path.isfile(__UpperCAmelCase ): raise ValueError( F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google" ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) lowerCAmelCase__ :Tuple = do_clean_text lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = load_vocab_and_emoji(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :int = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def snake_case ( self ): '''simple docstring''' return len(self.raw_vocab ) def snake_case ( self ): '''simple docstring''' return dict(self.raw_vocab , **self.added_tokens_encoder ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.subword_tokenizer.tokenize(__UpperCAmelCase , clean=self.do_clean_text ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.vocab.get(__UpperCAmelCase , self.vocab.get(self.unk_token ) ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = ''.join(__UpperCAmelCase ).strip() return out_string def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] ) if len(__UpperCAmelCase ) > self.model_max_length: lowerCAmelCase__ :Dict = input_ids[-self.model_max_length :] return input_ids def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :List[str] = 0 if os.path.isdir(__UpperCAmelCase ): lowerCAmelCase__ :Tuple = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ :int = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] ) else: lowerCAmelCase__ :List[Any] = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ :Optional[Any] = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase__ :int = token_index writer.write(','.join(__UpperCAmelCase ) + '\n' ) index += 1 with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as writer: json.dump(self.emoji , __UpperCAmelCase ) return vocab_file, emoji_file class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = vocab # same as swe lowerCAmelCase__ :str = ids_to_tokens # same as bpe lowerCAmelCase__ :Tuple = emoji lowerCAmelCase__ :Optional[int] = np.max([len(__UpperCAmelCase ) for w in self.vocab.keys()] ) lowerCAmelCase__ :Union[str, Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' ) lowerCAmelCase__ :List[Any] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' ) lowerCAmelCase__ :Optional[Any] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' ) lowerCAmelCase__ :int = re.compile( R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCAmelCase__ :List[Any] = re.compile( R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCAmelCase__ :Optional[Any] = re.compile( R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' ) lowerCAmelCase__ :Dict = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' lowerCAmelCase__ :Optional[int] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' lowerCAmelCase__ :Optional[Any] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} ) def __len__( self ): '''simple docstring''' return len(self.ids_to_tokens ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.content_repattera.sub('<URL>' , __UpperCAmelCase ) lowerCAmelCase__ :Dict = self.content_repattera.sub('<EMAIL>' , __UpperCAmelCase ) lowerCAmelCase__ :Dict = self.content_repattera.sub('<TEL>' , __UpperCAmelCase ) lowerCAmelCase__ :Dict = self.content_repattera.sub('<DATE>' , __UpperCAmelCase ) lowerCAmelCase__ :List[Any] = self.content_repattera.sub('<DATE>' , __UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = self.content_repattera.sub('<PRICE>' , __UpperCAmelCase ) lowerCAmelCase__ :List[str] = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: lowerCAmelCase__ :Optional[Any] = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' ) return content def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' lowerCAmelCase__ :Tuple = text.replace(' ' , '<SP>' ) lowerCAmelCase__ :Union[str, Any] = text.replace(' ' , '<SP>' ) lowerCAmelCase__ :Any = text.replace('\r\n' , '<BR>' ) lowerCAmelCase__ :Tuple = text.replace('\n' , '<BR>' ) lowerCAmelCase__ :int = text.replace('\r' , '<BR>' ) lowerCAmelCase__ :int = text.replace('\t' , '<TAB>' ) lowerCAmelCase__ :Optional[Any] = text.replace('—' , 'ー' ) lowerCAmelCase__ :List[Any] = text.replace('−' , 'ー' ) for k, v in self.emoji["emoji"].items(): if k in text: lowerCAmelCase__ :List[str] = text.replace(__UpperCAmelCase , __UpperCAmelCase ) if clean: lowerCAmelCase__ :Optional[Any] = self.clean_text(__UpperCAmelCase ) def check_simbol(__UpperCAmelCase ): lowerCAmelCase__ :Dict = x.encode() if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 2: lowerCAmelCase__ :Optional[int] = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xc2_a1 and c <= 0xc2_bf) or (c >= 0xc7_80 and c <= 0xc7_83) or (c >= 0xca_b9 and c <= 0xcb_bf) or (c >= 0xcc_80 and c <= 0xcd_a2) ): return True return False def checkuae(__UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = x.encode() if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 3: lowerCAmelCase__ :Any = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xe2_80_80 and c <= 0xe2_b0_7f: return True return False lowerCAmelCase__ :Optional[Any] = 0 lowerCAmelCase__ :int = [] while pos < len(__UpperCAmelCase ): lowerCAmelCase__ :Optional[Any] = min(len(__UpperCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3 lowerCAmelCase__ :Tuple = [] # (token_id, token, pos) for e in range(__UpperCAmelCase , __UpperCAmelCase , -1 ): lowerCAmelCase__ :Optional[Any] = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(__UpperCAmelCase ) > 2: lowerCAmelCase__ :Tuple = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(__UpperCAmelCase ) > 0: # the smallest token_id is adopted lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[0] )[0] result.append(__UpperCAmelCase ) lowerCAmelCase__ :str = e else: lowerCAmelCase__ :List[Any] = pos + 1 lowerCAmelCase__ :Dict = text[pos:end] if check_simbol(__UpperCAmelCase ): result.append('<KIGOU>' ) elif checkuae(__UpperCAmelCase ): result.append('<U2000U2BFF>' ) else: for i in wd.encode('utf-8' ): result.append('<|byte%d|>' % i ) lowerCAmelCase__ :List[str] = end return result def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="\n" ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = [] lowerCAmelCase__ :Tuple = [] lowerCAmelCase__ :int = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(__UpperCAmelCase ) > 0: words.append(bytearray(__UpperCAmelCase ).decode('utf-8' , errors='replace' ) ) lowerCAmelCase__ :str = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word] ) elif word == "<SP>": words.append(' ' ) elif word == "<BR>": words.append(__UpperCAmelCase ) elif word == "<TAB>": words.append('\t' ) elif word == "<BLOCK>": words.append('▀' ) elif word == "<KIGOU>": words.append('ǀ' ) elif word == "<U2000U2BFF>": words.append('‖' ) else: words.append(__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: words.append(bytearray(__UpperCAmelCase ).decode('utf-8' , errors='replace' ) ) lowerCAmelCase__ :Any = ''.join(__UpperCAmelCase ) return text
293
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=1_8 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , ): '''simple docstring''' lowerCAmelCase__ :Dict = size if size is not None else {'height': 1_8, 'width': 1_8} lowerCAmelCase__ :Tuple = parent lowerCAmelCase__ :List[Any] = batch_size lowerCAmelCase__ :List[Any] = num_channels lowerCAmelCase__ :Any = image_size lowerCAmelCase__ :int = min_resolution lowerCAmelCase__ :int = max_resolution lowerCAmelCase__ :Dict = do_resize lowerCAmelCase__ :str = size lowerCAmelCase__ :Any = apply_ocr def snake_case ( self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :str = LayoutLMvaImageProcessor if is_pytesseract_available() else None def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = LayoutLMvaImageProcessingTester(self ) @property def snake_case ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'apply_ocr' ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} ) lowerCAmelCase__ :List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} ) def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , __UpperCAmelCase ) self.assertIsInstance(encoding.boxes , __UpperCAmelCase ) # Test batched lowerCAmelCase__ :Any = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowerCAmelCase__ :Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowerCAmelCase__ :Any = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCAmelCase__ :Tuple = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) lowerCAmelCase__ :int = Image.open(ds[0]['file'] ).convert('RGB' ) lowerCAmelCase__ :Optional[int] = image_processing(__UpperCAmelCase , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCAmelCase__ :Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 lowerCAmelCase__ :List[str] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __UpperCAmelCase ) self.assertListEqual(encoding.boxes , __UpperCAmelCase ) # with apply_OCR = False lowerCAmelCase__ :int = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = image_processing(__UpperCAmelCase , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
293
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {} class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Dict = """llama""" __magic_name__ :List[str] = ["""past_key_values"""] def __init__( self , __UpperCAmelCase=3_2_0_0_0 , __UpperCAmelCase=4_0_9_6 , __UpperCAmelCase=1_1_0_0_8 , __UpperCAmelCase=3_2 , __UpperCAmelCase=3_2 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=2_0_4_8 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :str = vocab_size lowerCAmelCase__ :List[str] = max_position_embeddings lowerCAmelCase__ :int = hidden_size lowerCAmelCase__ :Tuple = intermediate_size lowerCAmelCase__ :Dict = num_hidden_layers lowerCAmelCase__ :int = num_attention_heads # for backward compatibility if num_key_value_heads is None: lowerCAmelCase__ :Any = num_attention_heads lowerCAmelCase__ :Any = num_key_value_heads lowerCAmelCase__ :List[Any] = hidden_act lowerCAmelCase__ :int = initializer_range lowerCAmelCase__ :str = rms_norm_eps lowerCAmelCase__ :Optional[int] = pretraining_tp lowerCAmelCase__ :str = use_cache lowerCAmelCase__ :str = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , ) def snake_case ( self ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F"got {self.rope_scaling}" ) lowerCAmelCase__ :str = self.rope_scaling.get('type' , __UpperCAmelCase ) lowerCAmelCase__ :Dict = self.rope_scaling.get('factor' , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" from ....utils import logging __A = logging.get_logger(__name__) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=2_0_4_8 ): '''simple docstring''' lowerCAmelCase__ :List[Any] = config.__dict__ lowerCAmelCase__ :Any = modal_hidden_size if num_labels: lowerCAmelCase__ :Union[str, Any] = num_labels
293
"""simple docstring""" import math def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be an integer" raise TypeError(_SCREAMING_SNAKE_CASE ) if number < 1: lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be > 0" raise ValueError(_SCREAMING_SNAKE_CASE ) elif number == 1: return 3 elif number == 2: return 5 else: lowerCAmelCase__ :Union[str, Any] = int(math.log(number // 3 , 2 ) ) + 2 lowerCAmelCase__ :Optional[Any] = [3, 5] lowerCAmelCase__ :Optional[Any] = 2 lowerCAmelCase__ :List[str] = 3 for block in range(1 , _SCREAMING_SNAKE_CASE ): for _ in range(_SCREAMING_SNAKE_CASE ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): __A = 0 try: __A = proth(number) except ValueError: print(F'''ValueError: there is no {number}th Proth number''') continue print(F'''The {number}th Proth number: {value}''')
293
1
"""simple docstring""" from __future__ import annotations from statistics import mean def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]: """simple docstring""" lowerCAmelCase__ :Optional[int] = [0] * no_of_processes lowerCAmelCase__ :str = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Any = burst_time[i] lowerCAmelCase__ :list[int] = [] lowerCAmelCase__ :Optional[Any] = 0 lowerCAmelCase__ :Optional[int] = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: lowerCAmelCase__ :List[Any] = [] lowerCAmelCase__ :int = -1 for i in range(_SCREAMING_SNAKE_CASE ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: lowerCAmelCase__ :int = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: lowerCAmelCase__ :Dict = i total_time += burst_time[target_process] completed += 1 lowerCAmelCase__ :Tuple = 0 lowerCAmelCase__ :List[str] = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]: """simple docstring""" lowerCAmelCase__ :Optional[Any] = [0] * no_of_processes for i in range(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :str = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") __A = 4 __A = [2, 5, 3, 7] __A = [0, 0, 0, 0] __A = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __A = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t''' F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}''' ) print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''') print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
293
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __A = TypeVar("""KEY""") __A = TypeVar("""VAL""") @dataclass(frozen=a , slots=a ) class _lowerCAmelCase ( Generic[KEY, VAL] ): """simple docstring""" __magic_name__ :KEY __magic_name__ :VAL class _lowerCAmelCase ( _Item ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __bool__( self ): '''simple docstring''' return False __A = _DeletedItem() class _lowerCAmelCase ( MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.75 ): '''simple docstring''' lowerCAmelCase__ :List[str] = initial_block_size lowerCAmelCase__ :list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ :Tuple = capacity_factor lowerCAmelCase__ :str = 0 def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return hash(__UpperCAmelCase ) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return (ind + 1) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = self._buckets[ind] if not stored: lowerCAmelCase__ :Dict = _Item(__UpperCAmelCase , __UpperCAmelCase ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ :Optional[Any] = _Item(__UpperCAmelCase , __UpperCAmelCase ) return True else: return False def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ :Optional[Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self._buckets lowerCAmelCase__ :Tuple = [None] * new_size lowerCAmelCase__ :List[Any] = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self._get_bucket_index(__UpperCAmelCase ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ :Tuple = self._get_next_ind(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): break def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(__UpperCAmelCase , __UpperCAmelCase ) def __delitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): lowerCAmelCase__ :int = self._buckets[ind] if item is None: raise KeyError(__UpperCAmelCase ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ :List[str] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): lowerCAmelCase__ :str = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__UpperCAmelCase ) def __len__( self ): '''simple docstring''' return self._len def __iter__( self ): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ' ,'.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
293
1
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def __A (_SCREAMING_SNAKE_CASE ) ->Tuple: """simple docstring""" lowerCAmelCase__ :List[str] = np.max(_outputs , axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Any = """sigmoid""" __magic_name__ :Optional[Any] = """softmax""" __magic_name__ :Optional[Any] = """none""" @add_end_docstrings( a , r""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `\"default\"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `\"sigmoid\"`: Applies the sigmoid function on the output. - `\"softmax\"`: Applies the softmax function on the output. - `\"none\"`: Does not apply any function on the output. """ , ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Union[str, Any] = False __magic_name__ :Dict = ClassificationFunction.NONE def __init__( self , **__UpperCAmelCase ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = tokenizer_kwargs lowerCAmelCase__ :List[Any] = {} if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None: lowerCAmelCase__ :List[Any] = self.model.config.return_all_scores if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None: lowerCAmelCase__ :int = top_k lowerCAmelCase__ :Dict = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __UpperCAmelCase , ) if return_all_scores: lowerCAmelCase__ :List[Any] = None else: lowerCAmelCase__ :Union[str, Any] = 1 if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowerCAmelCase__ :List[Any] = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowerCAmelCase__ :Optional[Any] = 'top_k' not in kwargs if isinstance(args[0] , __UpperCAmelCase ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = self.framework if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.model(**__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True ): '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowerCAmelCase__ :str = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowerCAmelCase__ :int = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None: lowerCAmelCase__ :Optional[Any] = self.model.config.function_to_apply else: lowerCAmelCase__ :Dict = ClassificationFunction.NONE lowerCAmelCase__ :int = model_outputs['logits'][0] lowerCAmelCase__ :Union[str, Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowerCAmelCase__ :Dict = sigmoid(__UpperCAmelCase ) elif function_to_apply == ClassificationFunction.SOFTMAX: lowerCAmelCase__ :int = softmax(__UpperCAmelCase ) elif function_to_apply == ClassificationFunction.NONE: lowerCAmelCase__ :Tuple = outputs else: raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowerCAmelCase__ :Any = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCAmelCase ) ] if not _legacy: dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase ) if top_k is not None: lowerCAmelCase__ :List[str] = dict_scores[:top_k] return dict_scores
293
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel __A = logging.getLogger(__name__) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" if os.path.exists(_SCREAMING_SNAKE_CASE ): if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) else: os.makedirs(_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ :Dict = 2 if unlogit: lowerCAmelCase__ :List[str] = torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :str = p * torch.log(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[str] = 0 return -plogp.sum(dim=-1 ) def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" logger.info('lv, h >\t' + '\t'.join(F"{x + 1}" for x in range(len(_SCREAMING_SNAKE_CASE ) ) ) ) for row in range(len(_SCREAMING_SNAKE_CASE ) ): if tensor.dtype != torch.long: logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:.5f}" for x in tensor[row].cpu().data ) ) else: logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:d}" for x in tensor[row].cpu().data ) ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ :Dict = model.config.num_hidden_layers, model.config.num_attention_heads lowerCAmelCase__ :Any = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) lowerCAmelCase__ :Tuple = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) if head_mask is None: lowerCAmelCase__ :Optional[int] = torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) head_mask.requires_grad_(requires_grad=_SCREAMING_SNAKE_CASE ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCAmelCase__ :List[str] = None lowerCAmelCase__ :Any = 0.0 lowerCAmelCase__ :Any = 0.0 for step, inputs in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): lowerCAmelCase__ :str = tuple(t.to(args.device ) for t in inputs ) ((lowerCAmelCase__) , ) :Dict = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCAmelCase__ :str = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = entropy(attn.detach() , _SCREAMING_SNAKE_CASE ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_SCREAMING_SNAKE_CASE ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCAmelCase__ :Union[str, Any] = 2 lowerCAmelCase__ :Tuple = torch.pow(torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: lowerCAmelCase__ :str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) logger.info('Head ranked by importance scores' ) lowerCAmelCase__ :List[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCAmelCase__ :List[Any] = torch.arange( head_importance.numel() , device=args.device ) lowerCAmelCase__ :int = head_ranks.view_as(_SCREAMING_SNAKE_CASE ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) return attn_entropy, head_importance, total_loss def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , _SCREAMING_SNAKE_CASE , original_score * args.masking_threshold ) lowerCAmelCase__ :Optional[int] = torch.ones_like(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCAmelCase__ :List[str] = original_score while current_score >= original_score * args.masking_threshold: lowerCAmelCase__ :List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCAmelCase__ :str = float('Inf' ) lowerCAmelCase__ :List[str] = head_importance.view(-1 ).sort()[1] if len(_SCREAMING_SNAKE_CASE ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads lowerCAmelCase__ :int = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) lowerCAmelCase__ :Dict = new_head_mask.view(-1 ) lowerCAmelCase__ :Any = 0.0 lowerCAmelCase__ :Tuple = new_head_mask.view_as(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[int] = new_head_mask.clone().detach() print_ad_tensor(_SCREAMING_SNAKE_CASE ) # Compute metric and head importance again lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , _SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('Final head mask' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = 1 / loss lowerCAmelCase__ :Tuple = datetime.now() - before_time lowerCAmelCase__ :List[str] = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ :List[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_SCREAMING_SNAKE_CASE ) ) } for k, v in heads_to_prune.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Union[str, Any] = [ v, ] assert sum(len(_SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ :int = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , actually_pruned=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ :int = 1 / loss lowerCAmelCase__ :Tuple = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 100 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 ) save_model(_SCREAMING_SNAKE_CASE , args.output_dir ) def __A () ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=_SCREAMING_SNAKE_CASE , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=_SCREAMING_SNAKE_CASE , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=_SCREAMING_SNAKE_CASE , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=128 , type=_SCREAMING_SNAKE_CASE , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=_SCREAMING_SNAKE_CASE , help='Batch size.' ) parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 ) parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) lowerCAmelCase__ :Any = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCAmelCase__ :List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) lowerCAmelCase__ :Optional[int] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCAmelCase__ :Dict = torch.device('cuda' , args.local_rank ) lowerCAmelCase__ :Tuple = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCAmelCase__ :int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCAmelCase__ :Optional[Any] = nn.parallel.DistributedDataParallel( _SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_SCREAMING_SNAKE_CASE ) elif args.n_gpu > 1: lowerCAmelCase__ :Union[str, Any] = nn.DataParallel(_SCREAMING_SNAKE_CASE ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE ) # Prepare dataset lowerCAmelCase__ :Optional[int] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCAmelCase__ :Union[str, Any] = (torch.from_numpy(_SCREAMING_SNAKE_CASE ),) lowerCAmelCase__ :Optional[int] = TensorDataset(*_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = RandomSampler(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCAmelCase__ :Optional[Any] = mask_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) prune_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
293
1
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __A = Lock() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowerCAmelCase__ :Any = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowerCAmelCase__ :Optional[int] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowerCAmelCase__ :Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ :str = [] lowerCAmelCase__ :Optional[Any] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowerCAmelCase__ :List[str] = Pipe() lowerCAmelCase__ :List[Any] = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowerCAmelCase__ :Dict = temp_rs lowerCAmelCase__ :Optional[Any] = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): lowerCAmelCase__ :Union[str, Any] = Pipe() lowerCAmelCase__ :List[str] = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowerCAmelCase__ :Union[str, Any] = temp_rs lowerCAmelCase__ :Any = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): lowerCAmelCase__ :str = result_pipe[p][0].recv() process_array_[p].join() return arr def __A () ->List[Any]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[str] = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
293
"""simple docstring""" import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = 1_0 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = [1, 2, 3, 4] lowerCAmelCase__ :Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = '' lowerCAmelCase__ , lowerCAmelCase__ :Any = process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) self.assertEqual(__UpperCAmelCase , [] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) lowerCAmelCase__ , lowerCAmelCase__ :str = process_story(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :List[str] = ['It was the best of times.'] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = torch.tensor([1, 2, 3, 4] ) lowerCAmelCase__ :List[str] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowerCAmelCase__ :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 2_3 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowerCAmelCase__ :Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 1_0_1 lowerCAmelCase__ :str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowerCAmelCase__ :Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowerCAmelCase__ :List[Any] = compute_token_type_ids(__UpperCAmelCase , __UpperCAmelCase ) np.testing.assert_array_equal(__UpperCAmelCase , __UpperCAmelCase )
293
1
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any: """simple docstring""" print('\nThe shortest path matrix using Floyd Warshall algorithm\n' ) for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): if dist[i][j] != float('inf' ): print(int(dist[i][j] ) , end='\t' ) else: print('INF' , end='\t' ) print() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]: """simple docstring""" lowerCAmelCase__ :int = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :List[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(_SCREAMING_SNAKE_CASE ): # looping through rows of graph array for i in range(_SCREAMING_SNAKE_CASE ): # looping through columns of graph array for j in range(_SCREAMING_SNAKE_CASE ): if ( dist[i][k] != float('inf' ) and dist[k][j] != float('inf' ) and dist[i][k] + dist[k][j] < dist[i][j] ): lowerCAmelCase__ :Any = dist[i][k] + dist[k][j] _print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return dist, v if __name__ == "__main__": __A = int(input("""Enter number of vertices: """)) __A = int(input("""Enter number of edges: """)) __A = [[float("""inf""") for i in range(v)] for j in range(v)] for i in range(v): __A = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("""\nEdge """, i + 1) __A = int(input("""Enter source:""")) __A = int(input("""Enter destination:""")) __A = float(input("""Enter weight:""")) __A = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
293
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 'hf-internal-testing/tiny-random-t5' lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Any = tokenizer('This is me' , return_tensors='pt' ) lowerCAmelCase__ :Dict = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) lowerCAmelCase__ :Optional[Any] = model.generate(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) lowerCAmelCase__ :Union[str, Any] = model_reloaded.generate(**__UpperCAmelCase ) self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = 'hf-internal-testing/tiny-random-t5' lowerCAmelCase__ :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__UpperCAmelCase ): model.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = model.reverse_bettertransformer() model.save_pretrained(__UpperCAmelCase )
293
1
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __A () ->Tuple: """simple docstring""" lowerCAmelCase__ :Tuple = HfArgumentParser(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = parser.parse_args_into_dataclasses()[0] lowerCAmelCase__ :int = TensorFlowBenchmark(args=_SCREAMING_SNAKE_CASE ) try: lowerCAmelCase__ :Tuple = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowerCAmelCase__ :Optional[Any] = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' lowerCAmelCase__ :int = ' '.join(str(_SCREAMING_SNAKE_CASE ).split(' ' )[:-1] ) lowerCAmelCase__ :Optional[Any] = '' lowerCAmelCase__ :Optional[int] = eval(str(_SCREAMING_SNAKE_CASE ).split(' ' )[-1] ) lowerCAmelCase__ :Optional[int] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: lowerCAmelCase__ :str = full_error_msg + begin_error_msg + str(_SCREAMING_SNAKE_CASE ) raise ValueError(_SCREAMING_SNAKE_CASE ) benchmark.run() if __name__ == "__main__": main()
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] __A = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] __A = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): __A = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Union[List[PIL.Image.Image], np.ndarray] __magic_name__ :Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version(""">=""", """0.0.12""") ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :np.ndarray __magic_name__ :List[bool] from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
293
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowerCAmelCase ( yaml.SafeLoader ): """simple docstring""" def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase__ :str = [tuple(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else key for key in keys] lowerCAmelCase__ :Optional[int] = Counter(__UpperCAmelCase ) lowerCAmelCase__ :int = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase ) self._check_no_duplicates_on_constructed_node(__UpperCAmelCase ) return mapping def __A (_SCREAMING_SNAKE_CASE ) ->Tuple[Optional[str], str]: """simple docstring""" lowerCAmelCase__ :Optional[Any] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase__ :Optional[int] = full_content[1:].index('---' ) + 1 lowerCAmelCase__ :Union[str, Any] = '\n'.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :List[str] = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(__UpperCAmelCase ) else: return cls() def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path.exists(): with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ :Optional[Any] = readme_file.read() else: lowerCAmelCase__ :Union[str, Any] = None lowerCAmelCase__ :Union[str, Any] = self._to_readme(__UpperCAmelCase ) with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file: readme_file.write(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase = None ): '''simple docstring''' if readme_content is not None: lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = _split_yaml_from_readme(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content else: lowerCAmelCase__ :str = '---\n' + self.to_yaml_string() + '---\n' return full_content @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase__ :int = { (key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' return yaml.safe_dump( { (key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' ) __A = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser __A = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") __A = ap.parse_args() __A = Path(args.readme_filepath) __A = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
293
1
"""simple docstring""" from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0.0 , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = "layer_norm" , __UpperCAmelCase = False , ): '''simple docstring''' super().__init__() lowerCAmelCase__ :Dict = only_cross_attention lowerCAmelCase__ :Any = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero' lowerCAmelCase__ :Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: lowerCAmelCase__ :Optional[Any] = AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase ) elif self.use_ada_layer_norm_zero: lowerCAmelCase__ :List[str] = AdaLayerNormZero(__UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ :Dict = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = Attention( query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__UpperCAmelCase , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. lowerCAmelCase__ :Dict = ( AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase ) if self.use_ada_layer_norm else nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase ) ) lowerCAmelCase__ :List[Any] = Attention( query_dim=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , upcast_attention=__UpperCAmelCase , ) # is self-attn if encoder_hidden_states is none else: lowerCAmelCase__ :List[str] = None lowerCAmelCase__ :List[Any] = None # 3. Feed-forward lowerCAmelCase__ :Union[str, Any] = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = FeedForward(__UpperCAmelCase , dropout=__UpperCAmelCase , activation_fn=__UpperCAmelCase , final_dropout=__UpperCAmelCase ) # let chunk size default to None lowerCAmelCase__ :Tuple = None lowerCAmelCase__ :Dict = 0 def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :str = chunk_size lowerCAmelCase__ :Optional[int] = dim def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ): '''simple docstring''' if self.use_ada_layer_norm: lowerCAmelCase__ :Tuple = self.norma(__UpperCAmelCase , __UpperCAmelCase ) elif self.use_ada_layer_norm_zero: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self.norma( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hidden_dtype=hidden_states.dtype ) else: lowerCAmelCase__ :Optional[int] = self.norma(__UpperCAmelCase ) lowerCAmelCase__ :Any = cross_attention_kwargs if cross_attention_kwargs is not None else {} lowerCAmelCase__ :Optional[int] = self.attna( __UpperCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) if self.use_ada_layer_norm_zero: lowerCAmelCase__ :Any = gate_msa.unsqueeze(1 ) * attn_output lowerCAmelCase__ :Dict = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: lowerCAmelCase__ :List[Any] = ( self.norma(__UpperCAmelCase , __UpperCAmelCase ) if self.use_ada_layer_norm else self.norma(__UpperCAmelCase ) ) lowerCAmelCase__ :List[Any] = self.attna( __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) lowerCAmelCase__ :Union[str, Any] = attn_output + hidden_states # 3. Feed-forward lowerCAmelCase__ :str = self.norma(__UpperCAmelCase ) if self.use_ada_layer_norm_zero: lowerCAmelCase__ :List[str] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." ) lowerCAmelCase__ :Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size lowerCAmelCase__ :Dict = torch.cat( [self.ff(__UpperCAmelCase ) for hid_slice in norm_hidden_states.chunk(__UpperCAmelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: lowerCAmelCase__ :str = self.ff(__UpperCAmelCase ) if self.use_ada_layer_norm_zero: lowerCAmelCase__ :Tuple = gate_mlp.unsqueeze(1 ) * ff_output lowerCAmelCase__ :List[Any] = ff_output + hidden_states return hidden_states class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = "geglu" , __UpperCAmelCase = False , ): '''simple docstring''' super().__init__() lowerCAmelCase__ :Any = int(dim * mult ) lowerCAmelCase__ :str = dim_out if dim_out is not None else dim if activation_fn == "gelu": lowerCAmelCase__ :Optional[Any] = GELU(__UpperCAmelCase , __UpperCAmelCase ) if activation_fn == "gelu-approximate": lowerCAmelCase__ :Union[str, Any] = GELU(__UpperCAmelCase , __UpperCAmelCase , approximate='tanh' ) elif activation_fn == "geglu": lowerCAmelCase__ :Optional[Any] = GEGLU(__UpperCAmelCase , __UpperCAmelCase ) elif activation_fn == "geglu-approximate": lowerCAmelCase__ :Union[str, Any] = ApproximateGELU(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :int = nn.ModuleList([] ) # project in self.net.append(__UpperCAmelCase ) # project dropout self.net.append(nn.Dropout(__UpperCAmelCase ) ) # project out self.net.append(nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(__UpperCAmelCase ) ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' for module in self.net: lowerCAmelCase__ :Optional[int] = module(__UpperCAmelCase ) return hidden_states class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "none" ): '''simple docstring''' super().__init__() lowerCAmelCase__ :str = nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :List[str] = approximate def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(__UpperCAmelCase , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.proj(__UpperCAmelCase ) lowerCAmelCase__ :Dict = self.gelu(__UpperCAmelCase ) return hidden_states class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() lowerCAmelCase__ :int = nn.Linear(__UpperCAmelCase , dim_out * 2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(__UpperCAmelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :int = self.proj(__UpperCAmelCase ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(__UpperCAmelCase ) class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() lowerCAmelCase__ :Dict = nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.proj(__UpperCAmelCase ) return x * torch.sigmoid(1.7_02 * x ) class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() lowerCAmelCase__ :List[str] = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :List[Any] = nn.SiLU() lowerCAmelCase__ :Union[str, Any] = nn.Linear(__UpperCAmelCase , embedding_dim * 2 ) lowerCAmelCase__ :List[Any] = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.linear(self.silu(self.emb(__UpperCAmelCase ) ) ) lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = torch.chunk(__UpperCAmelCase , 2 ) lowerCAmelCase__ :Union[str, Any] = self.norm(__UpperCAmelCase ) * (1 + scale) + shift return x class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() lowerCAmelCase__ :int = CombinedTimestepLabelEmbeddings(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = nn.SiLU() lowerCAmelCase__ :Optional[int] = nn.Linear(__UpperCAmelCase , 6 * embedding_dim , bias=__UpperCAmelCase ) lowerCAmelCase__ :int = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase , eps=1E-6 ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.linear(self.silu(self.emb(__UpperCAmelCase , __UpperCAmelCase , hidden_dtype=__UpperCAmelCase ) ) ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = emb.chunk(6 , dim=1 ) lowerCAmelCase__ :Any = self.norm(__UpperCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 1E-5 ): '''simple docstring''' super().__init__() lowerCAmelCase__ :Any = num_groups lowerCAmelCase__ :str = eps if act_fn is None: lowerCAmelCase__ :List[Any] = None else: lowerCAmelCase__ :Optional[Any] = get_activation(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = nn.Linear(__UpperCAmelCase , out_dim * 2 ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self.act: lowerCAmelCase__ :Optional[Any] = self.act(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = self.linear(__UpperCAmelCase ) lowerCAmelCase__ :Dict = emb[:, :, None, None] lowerCAmelCase__ , lowerCAmelCase__ :Tuple = emb.chunk(2 , dim=1 ) lowerCAmelCase__ :List[Any] = F.group_norm(__UpperCAmelCase , self.num_groups , eps=self.eps ) lowerCAmelCase__ :Union[str, Any] = x * (1 + scale) + shift return x
293
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
293
1
"""simple docstring""" import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __A = logging.getLogger(__name__) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ): '''simple docstring''' super().__init__( __UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , ) lowerCAmelCase__ :str = None def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' logger.info('initializing retrieval' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('dist initialized' ) # needs to be set manually lowerCAmelCase__ :Optional[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port lowerCAmelCase__ :Dict = str(distributed_port + 1 ) lowerCAmelCase__ :List[Any] = dist.new_group(ranks=__UpperCAmelCase , backend='gloo' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('dist not initialized / main' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def snake_case ( self ): '''simple docstring''' return dist.get_rank(group=self.process_group ) == 0 def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=torch.floataa ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = torch.empty(__UpperCAmelCase , dtype=__UpperCAmelCase ) dist.scatter(__UpperCAmelCase , src=0 , scatter_list=__UpperCAmelCase , group=self.process_group ) return target_tensor def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = psutil.net_if_addrs() # a hacky way to deal with varying network interface names lowerCAmelCase__ :Tuple = next((addr for addr in addrs if addr.startswith('e' )) , __UpperCAmelCase ) return ifname def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if not dist.is_initialized(): lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase ) # distributed training lowerCAmelCase__ :List[str] = dist.get_world_size(group=self.process_group ) # gather logic lowerCAmelCase__ :Any = None if self._is_main(): lowerCAmelCase__ :Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__UpperCAmelCase )] dist.gather(torch.tensor(__UpperCAmelCase ) , dst=0 , gather_list=__UpperCAmelCase , group=self.process_group ) # scatter logic lowerCAmelCase__ :Optional[int] = question_hidden_states.shape[0] lowerCAmelCase__ :Tuple = [] lowerCAmelCase__ :str = [] if self._is_main(): assert len(__UpperCAmelCase ) == world_size lowerCAmelCase__ , lowerCAmelCase__ :Any = self._main_retrieve(torch.cat(__UpperCAmelCase ).numpy() , __UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ :str = torch.tensor(__UpperCAmelCase ), torch.tensor(__UpperCAmelCase ) lowerCAmelCase__ :Dict = self._chunk_tensor(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Tuple = self._chunk_tensor(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :List[str] = self._scattered(__UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa ) lowerCAmelCase__ :Optional[int] = self._scattered(__UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__UpperCAmelCase )
293
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __A = logging.getLogger(__name__) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase=-1 ): '''simple docstring''' lowerCAmelCase__ :Dict = label_idx def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Optional[Any] = mode.value lowerCAmelCase__ :List[str] = os.path.join(__UpperCAmelCase , F"{mode}.txt" ) lowerCAmelCase__ :List[str] = 1 lowerCAmelCase__ :Union[str, Any] = [] with open(__UpperCAmelCase , encoding='utf-8' ) as f: lowerCAmelCase__ :str = [] lowerCAmelCase__ :Dict = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 lowerCAmelCase__ :Tuple = [] lowerCAmelCase__ :List[str] = [] else: lowerCAmelCase__ :List[str] = line.split(' ' ) words.append(splits[0] ) if len(__UpperCAmelCase ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) return examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(__UpperCAmelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowerCAmelCase__ :Optional[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(__UpperCAmelCase ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: lowerCAmelCase__ :Any = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ :Union[str, Any] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__(label_idx=-2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: lowerCAmelCase__ :str = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ :Optional[Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _lowerCAmelCase ( a ): """simple docstring""" def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = mode.value lowerCAmelCase__ :Union[str, Any] = os.path.join(__UpperCAmelCase , F"{mode}.txt" ) lowerCAmelCase__ :Any = 1 lowerCAmelCase__ :Optional[Any] = [] with open(__UpperCAmelCase , encoding='utf-8' ) as f: for sentence in parse_incr(__UpperCAmelCase ): lowerCAmelCase__ :Dict = [] lowerCAmelCase__ :Dict = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 return examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = 0 for sentence in parse_incr(__UpperCAmelCase ): lowerCAmelCase__ :Optional[int] = preds_list[example_id] lowerCAmelCase__ :Tuple = '' for token in sentence: out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(__UpperCAmelCase ) example_id += 1 def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
293
1
"""simple docstring""" import functools def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) @functools.cache def min_distance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCAmelCase__ :Dict = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _SCREAMING_SNAKE_CASE ) , 1 + min_distance(_SCREAMING_SNAKE_CASE , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
293
"""simple docstring""" from __future__ import annotations __A = tuple[int, int, int] __A = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase __A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" # -------------------------- default selection -------------------------- # rotors -------------------------- __A = """EGZWVONAHDCLFQMSIPJBYUKXTR""" __A = """FOBHMDKEXQNRAULPGSJVTYICZW""" __A = """ZJXESIUQLHAVRMDOYGTNFWPBKC""" # reflector -------------------------- __A = { """A""": """N""", """N""": """A""", """B""": """O""", """O""": """B""", """C""": """P""", """P""": """C""", """D""": """Q""", """Q""": """D""", """E""": """R""", """R""": """E""", """F""": """S""", """S""": """F""", """G""": """T""", """T""": """G""", """H""": """U""", """U""": """H""", """I""": """V""", """V""": """I""", """J""": """W""", """W""": """J""", """K""": """X""", """X""": """K""", """L""": """Y""", """Y""": """L""", """M""": """Z""", """Z""": """M""", } # -------------------------- extra rotors -------------------------- __A = """RMDJXFUWGISLHVTCQNKYPBEZOA""" __A = """SGLCPQWZHKXAREONTFBVIYJUDM""" __A = """HVSICLTYKQUBXDWAJZOMFGPREN""" __A = """RZWQHFMVDBKICJLNTUXAGYPSOE""" __A = """LFKIJODBEGAMQPXVUHYSTCZRWN""" __A = """KOAEGVDHXPQZMLFTYWJNBRCIUS""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: """simple docstring""" if (unique_rotsel := len(set(_SCREAMING_SNAKE_CASE ) )) < 3: lowerCAmelCase__ :Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})" raise Exception(_SCREAMING_SNAKE_CASE ) # Checks if rotor positions are valid lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = rotpos if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Tuple = F"First rotor position is not within range of 1..26 ({rotorposa}" raise ValueError(_SCREAMING_SNAKE_CASE ) if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_SCREAMING_SNAKE_CASE ) if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Union[str, Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_SCREAMING_SNAKE_CASE ) # Validates string and returns dict lowerCAmelCase__ :int = _plugboard(_SCREAMING_SNAKE_CASE ) return rotpos, rotsel, pbdict def __A (_SCREAMING_SNAKE_CASE ) ->dict[str, str]: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :str = F"Plugboard setting isn't type string ({type(_SCREAMING_SNAKE_CASE )})" raise TypeError(_SCREAMING_SNAKE_CASE ) elif len(_SCREAMING_SNAKE_CASE ) % 2 != 0: lowerCAmelCase__ :str = F"Odd number of symbols ({len(_SCREAMING_SNAKE_CASE )})" raise Exception(_SCREAMING_SNAKE_CASE ) elif pbstring == "": return {} pbstring.replace(' ' , '' ) # Checks if all characters are unique lowerCAmelCase__ :Any = set() for i in pbstring: if i not in abc: lowerCAmelCase__ :Any = F"'{i}' not in list of symbols" raise Exception(_SCREAMING_SNAKE_CASE ) elif i in tmppbl: lowerCAmelCase__ :Dict = F"Duplicate symbol ({i})" raise Exception(_SCREAMING_SNAKE_CASE ) else: tmppbl.add(_SCREAMING_SNAKE_CASE ) del tmppbl # Created the dictionary lowerCAmelCase__ :List[Any] = {} for j in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ): lowerCAmelCase__ :Optional[int] = pbstring[j + 1] lowerCAmelCase__ :Union[str, Any] = pbstring[j] return pb def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE = "" , ) ->str: """simple docstring""" lowerCAmelCase__ :Tuple = text.upper() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = _validator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , plugb.upper() ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = rotor_position lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 lowerCAmelCase__ :Dict = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: lowerCAmelCase__ :Dict = plugboard[symbol] # rotor ra -------------------------- lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :str = rotora[index % len(_SCREAMING_SNAKE_CASE )] # rotor rb -------------------------- lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :int = rotora[index % len(_SCREAMING_SNAKE_CASE )] # rotor rc -------------------------- lowerCAmelCase__ :str = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :Optional[Any] = rotora[index % len(_SCREAMING_SNAKE_CASE )] # reflector -------------------------- # this is the reason you don't need another machine to decipher lowerCAmelCase__ :str = reflector[symbol] # 2nd rotors lowerCAmelCase__ :Tuple = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] lowerCAmelCase__ :Optional[int] = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] lowerCAmelCase__ :Any = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] # 2nd plugboard if symbol in plugboard: lowerCAmelCase__ :Union[str, Any] = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :str = 0 rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :List[Any] = 0 rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_SCREAMING_SNAKE_CASE ) return "".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = """This is my Python script that emulates the Enigma machine from WWII.""" __A = (1, 1, 1) __A = """pictures""" __A = (rotora, rotora, rotora) __A = enigma(message, rotor_pos, rotor_sel, pb) print("""Encrypted message:""", en) print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
293
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
293
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def __A (_SCREAMING_SNAKE_CASE ) ->Tuple: """simple docstring""" lowerCAmelCase__ :List[str] = np.max(_outputs , axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Any = """sigmoid""" __magic_name__ :Optional[Any] = """softmax""" __magic_name__ :Optional[Any] = """none""" @add_end_docstrings( a , r""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `\"default\"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `\"sigmoid\"`: Applies the sigmoid function on the output. - `\"softmax\"`: Applies the softmax function on the output. - `\"none\"`: Does not apply any function on the output. """ , ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Union[str, Any] = False __magic_name__ :Dict = ClassificationFunction.NONE def __init__( self , **__UpperCAmelCase ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = tokenizer_kwargs lowerCAmelCase__ :List[Any] = {} if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None: lowerCAmelCase__ :List[Any] = self.model.config.return_all_scores if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None: lowerCAmelCase__ :int = top_k lowerCAmelCase__ :Dict = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __UpperCAmelCase , ) if return_all_scores: lowerCAmelCase__ :List[Any] = None else: lowerCAmelCase__ :Union[str, Any] = 1 if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowerCAmelCase__ :List[Any] = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowerCAmelCase__ :Optional[Any] = 'top_k' not in kwargs if isinstance(args[0] , __UpperCAmelCase ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = self.framework if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.model(**__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True ): '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowerCAmelCase__ :str = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowerCAmelCase__ :int = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None: lowerCAmelCase__ :Optional[Any] = self.model.config.function_to_apply else: lowerCAmelCase__ :Dict = ClassificationFunction.NONE lowerCAmelCase__ :int = model_outputs['logits'][0] lowerCAmelCase__ :Union[str, Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowerCAmelCase__ :Dict = sigmoid(__UpperCAmelCase ) elif function_to_apply == ClassificationFunction.SOFTMAX: lowerCAmelCase__ :int = softmax(__UpperCAmelCase ) elif function_to_apply == ClassificationFunction.NONE: lowerCAmelCase__ :Tuple = outputs else: raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowerCAmelCase__ :Any = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCAmelCase ) ] if not _legacy: dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase ) if top_k is not None: lowerCAmelCase__ :List[str] = dict_scores[:top_k] return dict_scores
293
1
"""simple docstring""" from __future__ import annotations __A = 1.6_021e-19 # units = C def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple[str, float]: """simple docstring""" if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
293
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) lowerCAmelCase__ :Union[str, Any] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) ) return round(_SCREAMING_SNAKE_CASE , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
293
1
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class _lowerCAmelCase : """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ): '''simple docstring''' lowerCAmelCase__ :Tuple = parent lowerCAmelCase__ :List[str] = batch_size lowerCAmelCase__ :List[Any] = seq_length lowerCAmelCase__ :str = is_training lowerCAmelCase__ :Dict = use_input_mask lowerCAmelCase__ :Union[str, Any] = vocab_size lowerCAmelCase__ :Union[str, Any] = hidden_size lowerCAmelCase__ :Optional[Any] = num_hidden_layers lowerCAmelCase__ :List[str] = num_attention_heads lowerCAmelCase__ :Optional[Any] = intermediate_size lowerCAmelCase__ :Union[str, Any] = hidden_act lowerCAmelCase__ :int = hidden_dropout_prob lowerCAmelCase__ :int = attention_probs_dropout_prob lowerCAmelCase__ :Union[str, Any] = max_position_embeddings lowerCAmelCase__ :Union[str, Any] = initializer_range lowerCAmelCase__ :List[Any] = use_labels lowerCAmelCase__ :List[Any] = scope def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ :Tuple = None if self.use_input_mask: lowerCAmelCase__ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowerCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ :Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def snake_case ( self ): '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def snake_case ( self ): '''simple docstring''' ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) :Optional[Any] = self.prepare_config_and_inputs() lowerCAmelCase__ :Dict = True lowerCAmelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Tuple = BertGenerationEncoder(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :str = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) lowerCAmelCase__ :int = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :str = True lowerCAmelCase__ :List[str] = BertGenerationEncoder(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :List[str] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) lowerCAmelCase__ :str = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Tuple = True lowerCAmelCase__ :Union[str, Any] = True lowerCAmelCase__ :Optional[int] = BertGenerationDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() # first forward pass lowerCAmelCase__ :int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) lowerCAmelCase__ :Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase__ :Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase__ :Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase__ :Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase__ :List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase__ :Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['hidden_states'][0] lowerCAmelCase__ :int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['hidden_states'][0] # select random slice lowerCAmelCase__ :Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase__ :Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase__ :Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :int = BertGenerationDecoder(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self.prepare_config_and_inputs() lowerCAmelCase__ :Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( a , a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __magic_name__ :Optional[Any] = (BertGenerationDecoder,) if is_torch_available() else () __magic_name__ :Optional[Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = BertGenerationEncoderTester(self ) lowerCAmelCase__ :int = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 ) def snake_case ( self ): '''simple docstring''' self.config_tester.run_common_tests() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ :List[str] = 'bert' self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) :int = self.model_tester.prepare_config_and_inputs_for_decoder() lowerCAmelCase__ :int = None self.model_tester.create_and_check_model_as_decoder( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCAmelCase ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowerCAmelCase__ :Tuple = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): lowerCAmelCase__ :Dict = model(__UpperCAmelCase )[0] lowerCAmelCase__ :Any = torch.Size([1, 8, 1_0_2_4] ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ :Tuple = torch.tensor( [[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowerCAmelCase__ :str = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): lowerCAmelCase__ :Any = model(__UpperCAmelCase )[0] lowerCAmelCase__ :Tuple = torch.Size([1, 8, 5_0_3_5_8] ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ :Tuple = torch.tensor( [[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""OwlViTFeatureExtractor"""] __A = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' pass def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __A = ( """https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png""" ) @is_pipeline_test @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __magic_name__ :str = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = pipeline( 'document-question-answering' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) lowerCAmelCase__ :List[Any] = 'What is the placebo?' lowerCAmelCase__ :Dict = [ { 'image': load_image(__UpperCAmelCase ), 'question': question, }, { 'image': image, 'question': question, }, { 'image': image, 'question': question, 'word_boxes': word_boxes, }, ] return dqa_pipeline, examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :int = dqa_pipeline(__UpperCAmelCase , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )}, {'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' ) lowerCAmelCase__ :Union[str, Any] = INVOICE_URL lowerCAmelCase__ :Tuple = 'How many cats are there?' lowerCAmelCase__ :List[str] = [ {'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9}, {'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0}, ] lowerCAmelCase__ :Any = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase ) lowerCAmelCase__ :Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably lowerCAmelCase__ :List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png' lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(__UpperCAmelCase , [] ) # We can optionnally pass directly the words and bounding boxes lowerCAmelCase__ :Dict = './tests/fixtures/tests_samples/COCO/000000039769.png' lowerCAmelCase__ :List[str] = [] lowerCAmelCase__ :int = [] lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 ) self.assertEqual(__UpperCAmelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , ) lowerCAmelCase__ :str = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :Dict = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , ) lowerCAmelCase__ :List[Any] = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :Optional[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :int = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , ) lowerCAmelCase__ :List[str] = INVOICE_URL lowerCAmelCase__ :Any = 'What is the invoice number?' lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) lowerCAmelCase__ :Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] ] * 2 , ) lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) # This model should also work if `image` is set to None lowerCAmelCase__ :Tuple = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) @slow @require_torch @require_pytesseract @require_vision def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , max_seq_len=5_0 , ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] ] * 2 , ) lowerCAmelCase__ :Optional[Any] = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) # This model should also work if `image` is set to None lowerCAmelCase__ :List[str] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) @slow @require_torch def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = pipeline( 'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :str = 'What is the invoice number?' lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'answer': 'us-001'}] ) @require_tf @unittest.skip('Document question answering not implemented in TF' ) def snake_case ( self ): '''simple docstring''' pass
293
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor __A = logging.get_logger(__name__) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' warnings.warn( 'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DonutImageProcessor instead.' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
293
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :Tuple = StableDiffusionXLImgaImgPipeline __magic_name__ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} __magic_name__ :Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} __magic_name__ :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __magic_name__ :str = IMAGE_TO_IMAGE_IMAGE_PARAMS __magic_name__ :Any = IMAGE_TO_IMAGE_IMAGE_PARAMS def snake_case ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ :Optional[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , ) lowerCAmelCase__ :str = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , ) torch.manual_seed(0 ) lowerCAmelCase__ :str = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCAmelCase__ :str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , ) lowerCAmelCase__ :int = CLIPTextModel(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase ) lowerCAmelCase__ :Any = CLIPTextModelWithProjection(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase ) lowerCAmelCase__ :str = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith('mps' ): lowerCAmelCase__ :Optional[int] = torch.manual_seed(__UpperCAmelCase ) else: lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :int = self.get_dummy_components() lowerCAmelCase__ :List[str] = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :int = sd_pipe(**__UpperCAmelCase ).images lowerCAmelCase__ :int = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase__ :List[str] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def snake_case ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.get_dummy_components() lowerCAmelCase__ :str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # forward without prompt embeds lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt'] lowerCAmelCase__ :Tuple = negative_prompt lowerCAmelCase__ :str = 3 * [inputs['prompt']] lowerCAmelCase__ :Optional[Any] = sd_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = 3 * ['this is a negative prompt'] lowerCAmelCase__ :str = 3 * [inputs.pop('prompt' )] ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) :List[str] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe( **__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , ) lowerCAmelCase__ :Optional[Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ): '''simple docstring''' lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) ) lowerCAmelCase__ :Optional[int] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) lowerCAmelCase__ :int = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase ) lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase__ :List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
293
1
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' pass def __A (_SCREAMING_SNAKE_CASE ) ->str: """simple docstring""" lowerCAmelCase__ :str = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" lowerCAmelCase__ :Optional[Any] = np.array(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Tuple = npimg.shape return {"hash": hashimage(_SCREAMING_SNAKE_CASE ), "shape": shape} @is_pipeline_test @require_vision @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __magic_name__ :Dict = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __magic_name__ :Optional[Any] = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :int = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' pass @require_tf @unittest.skip('Image segmentation not implemented in TF' ) def snake_case ( self ): '''simple docstring''' pass @slow @require_torch def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = pipeline('mask-generation' , model='facebook/sam-vit-huge' ) lowerCAmelCase__ :List[Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_5_6 ) # Shortening by hashing lowerCAmelCase__ :Any = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.04_44}, {'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_21}, {'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_67}, {'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_32}, {'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.00_53}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.99_67}, {'mask': {'hash': '453c7844bd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_93}, {'mask': {'hash': '3d44f2926d', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.99_09}, {'mask': {'hash': '64033ddc3f', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.98_79}, {'mask': {'hash': '801064ff79', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.98_34}, {'mask': {'hash': '6172f276ef', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.97_16}, {'mask': {'hash': 'b49e60e084', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.96_12}, {'mask': {'hash': 'a811e775fd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_99}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_52}, {'mask': {'hash': '9d8257e080', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_32}, {'mask': {'hash': '32de6454a8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_16}, {'mask': {'hash': 'af3d4af2c8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_99}, {'mask': {'hash': '3c6db475fb', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_83}, {'mask': {'hash': 'c290813fb9', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_64}, {'mask': {'hash': 'b6f0b8f606', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_43}, {'mask': {'hash': '92ce16bfdf', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_43}, {'mask': {'hash': 'c749b25868', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_08}, {'mask': {'hash': 'efb6cab859', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.93_35}, {'mask': {'hash': '1ff2eafb30', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.93_26}, {'mask': {'hash': '788b798e24', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.92_62}, {'mask': {'hash': 'abea804f0e', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.89_99}, {'mask': {'hash': '7b9e8ddb73', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.89_86}, {'mask': {'hash': 'cd24047c8a', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.89_84}, {'mask': {'hash': '6943e6bcbd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.88_73}, {'mask': {'hash': 'b5f47c9191', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.88_71} ] , ) # fmt: on @require_torch @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = 'facebook/sam-vit-huge' lowerCAmelCase__ :str = pipeline('mask-generation' , model=__UpperCAmelCase ) lowerCAmelCase__ :List[str] = image_segmenter( 'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_5_6 ) # Shortening by hashing lowerCAmelCase__ :Optional[Any] = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.04_44}, {'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.02_10}, {'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_67}, {'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_32}, {'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.00_53}, ] , )
293
"""simple docstring""" import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" lowerCAmelCase__ :str = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(F"Building PyTorch model from configuration: {config}" ) lowerCAmelCase__ :int = BertForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __A = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
293
1
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :List[Any] = ["""image_processor""", """tokenizer"""] __magic_name__ :Any = """ChineseCLIPImageProcessor""" __magic_name__ :Any = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __UpperCAmelCase , ) lowerCAmelCase__ :Tuple = kwargs.pop('feature_extractor' ) lowerCAmelCase__ :int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :str = self.image_processor def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: lowerCAmelCase__ :Union[str, Any] = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: lowerCAmelCase__ :Any = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: lowerCAmelCase__ :Optional[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = self.tokenizer.model_input_names lowerCAmelCase__ :Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def snake_case ( self ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , ) return self.image_processor_class
293
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[str] = XGLMTokenizer __magic_name__ :Any = XGLMTokenizerFast __magic_name__ :Dict = True __magic_name__ :Union[str, Any] = True def snake_case ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ :int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = '<pad>' lowerCAmelCase__ :int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCAmelCase__ :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) lowerCAmelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def snake_case ( self ): '''simple docstring''' return XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) def snake_case ( self ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__UpperCAmelCase , f.name ) lowerCAmelCase__ :Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = pickle.dumps(__UpperCAmelCase ) pickle.loads(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCAmelCase__ :Optional[Any] = self.get_tokenizer() lowerCAmelCase__ :List[str] = self.get_rust_tokenizer() lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.' lowerCAmelCase__ :Dict = tokenizer.tokenize(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :int = self.get_rust_tokenizer() lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = 'Hello World!' lowerCAmelCase__ :Tuple = [2, 3_1_2_2_7, 4_4_4_7, 3_5] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off lowerCAmelCase__ :List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5] # fmt: on self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = { 'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='facebook/xglm-564M' , padding=__UpperCAmelCase , )
293
1
"""simple docstring""" import math import random def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->float: """simple docstring""" if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __A = 0.02 def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float: """simple docstring""" lowerCAmelCase__ :List[Any] = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(_SCREAMING_SNAKE_CASE ): # Forward propagation lowerCAmelCase__ :Tuple = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? lowerCAmelCase__ :Optional[Any] = (expected / 100) - layer_a # Error delta lowerCAmelCase__ :Tuple = layer_1_error * sigmoid_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __A = int(input("""Expected value: """)) __A = int(input("""Number of propagations: """)) print(forward_propagation(expected, number_propagations))
293
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __A = Lock() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowerCAmelCase__ :Any = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowerCAmelCase__ :Optional[int] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowerCAmelCase__ :Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(_SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ :str = [] lowerCAmelCase__ :Optional[Any] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowerCAmelCase__ :List[str] = Pipe() lowerCAmelCase__ :List[Any] = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowerCAmelCase__ :Dict = temp_rs lowerCAmelCase__ :Optional[Any] = temp_rr for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ): lowerCAmelCase__ :Union[str, Any] = Pipe() lowerCAmelCase__ :List[str] = Pipe() process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowerCAmelCase__ :Union[str, Any] = temp_rs lowerCAmelCase__ :Any = temp_rr process_array_.append( Process( target=_SCREAMING_SNAKE_CASE , args=( len(_SCREAMING_SNAKE_CASE ) - 1, arr[len(_SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ): lowerCAmelCase__ :str = result_pipe[p][0].recv() process_array_[p].join() return arr def __A () ->List[Any]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[str] = odd_even_transposition(_SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
293
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=a ): """simple docstring""" __magic_name__ :Dict = ["""torch""", """transformers""", """onnx"""] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _lowerCAmelCase ( metaclass=a ): """simple docstring""" __magic_name__ :List[Any] = ["""torch""", """transformers""", """onnx"""] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _lowerCAmelCase ( metaclass=a ): """simple docstring""" __magic_name__ :Tuple = ["""torch""", """transformers""", """onnx"""] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _lowerCAmelCase ( metaclass=a ): """simple docstring""" __magic_name__ :Tuple = ["""torch""", """transformers""", """onnx"""] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _lowerCAmelCase ( metaclass=a ): """simple docstring""" __magic_name__ :Tuple = ["""torch""", """transformers""", """onnx"""] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _lowerCAmelCase ( metaclass=a ): """simple docstring""" __magic_name__ :int = ["""torch""", """transformers""", """onnx"""] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] )
293
"""simple docstring""" from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(a ) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , **__UpperCAmelCase ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) requires_backends(self , 'vision' ) requires_backends(self , 'torch' ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) self.check_model_type(__UpperCAmelCase ) def snake_case ( self , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = {} lowerCAmelCase__ :Tuple = {} lowerCAmelCase__ :Any = {} # preprocess args if "points_per_batch" in kwargs: lowerCAmelCase__ :Dict = kwargs['points_per_batch'] if "points_per_crop" in kwargs: lowerCAmelCase__ :Union[str, Any] = kwargs['points_per_crop'] if "crops_n_layers" in kwargs: lowerCAmelCase__ :Any = kwargs['crops_n_layers'] if "crop_overlap_ratio" in kwargs: lowerCAmelCase__ :Any = kwargs['crop_overlap_ratio'] if "crop_n_points_downscale_factor" in kwargs: lowerCAmelCase__ :Dict = kwargs['crop_n_points_downscale_factor'] # postprocess args if "pred_iou_thresh" in kwargs: lowerCAmelCase__ :Tuple = kwargs['pred_iou_thresh'] if "stability_score_offset" in kwargs: lowerCAmelCase__ :Optional[int] = kwargs['stability_score_offset'] if "mask_threshold" in kwargs: lowerCAmelCase__ :List[Any] = kwargs['mask_threshold'] if "stability_score_thresh" in kwargs: lowerCAmelCase__ :Optional[Any] = kwargs['stability_score_thresh'] if "crops_nms_thresh" in kwargs: lowerCAmelCase__ :int = kwargs['crops_nms_thresh'] if "output_rle_mask" in kwargs: lowerCAmelCase__ :Union[str, Any] = kwargs['output_rle_mask'] if "output_bboxes_mask" in kwargs: lowerCAmelCase__ :Optional[Any] = kwargs['output_bboxes_mask'] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , *__UpperCAmelCase , num_workers=__UpperCAmelCase , batch_size=__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=6_4 , __UpperCAmelCase = 0 , __UpperCAmelCase = 5_1_2 / 1_5_0_0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 1 , ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = load_image(__UpperCAmelCase ) lowerCAmelCase__ :int = self.image_processor.size['longest_edge'] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = self.image_processor.generate_crop_boxes( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = self.image_processor(images=__UpperCAmelCase , return_tensors='pt' ) with self.device_placement(): if self.framework == "pt": lowerCAmelCase__ :Optional[int] = self.get_inference_context() with inference_context(): lowerCAmelCase__ :Any = self._ensure_tensor_on_device(__UpperCAmelCase , device=self.device ) lowerCAmelCase__ :Tuple = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) ) lowerCAmelCase__ :Optional[int] = image_embeddings lowerCAmelCase__ :List[Any] = grid_points.shape[1] lowerCAmelCase__ :Union[str, Any] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( 'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ' 'To return all points at once, set points_per_batch to None' ) for i in range(0 , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Optional[Any] = grid_points[:, i : i + points_per_batch, :, :] lowerCAmelCase__ :List[str] = input_labels[:, i : i + points_per_batch] lowerCAmelCase__ :List[Any] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0.88 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , ): '''simple docstring''' lowerCAmelCase__ :Any = model_inputs.pop('input_boxes' ) lowerCAmelCase__ :Optional[int] = model_inputs.pop('is_last' ) lowerCAmelCase__ :Dict = model_inputs.pop('original_sizes' ).tolist() lowerCAmelCase__ :Dict = model_inputs.pop('reshaped_input_sizes' ).tolist() lowerCAmelCase__ :Optional[int] = self.model(**__UpperCAmelCase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks lowerCAmelCase__ :int = model_outputs['pred_masks'] lowerCAmelCase__ :Optional[Any] = self.image_processor.post_process_masks( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , binarize=__UpperCAmelCase ) lowerCAmelCase__ :Any = model_outputs['iou_scores'] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.7 , ): '''simple docstring''' lowerCAmelCase__ :Dict = [] lowerCAmelCase__ :Optional[Any] = [] lowerCAmelCase__ :int = [] for model_output in model_outputs: all_scores.append(model_output.pop('iou_scores' ) ) all_masks.extend(model_output.pop('masks' ) ) all_boxes.append(model_output.pop('boxes' ) ) lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase ) lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self.image_processor.post_process_for_mask_generation( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Tuple = defaultdict(__UpperCAmelCase ) for output in model_outputs: for k, v in output.items(): extra[k].append(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = {} if output_rle_mask: lowerCAmelCase__ :str = rle_mask if output_bboxes_mask: lowerCAmelCase__ :Optional[int] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
293
1
"""simple docstring""" # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class _lowerCAmelCase : """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = False ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = scheduler lowerCAmelCase__ :Optional[Any] = optimizers if isinstance(__UpperCAmelCase , (list, tuple) ) else [optimizers] lowerCAmelCase__ :Union[str, Any] = split_batches lowerCAmelCase__ :str = step_with_optimizer lowerCAmelCase__ :Tuple = GradientState() def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowerCAmelCase__ :Optional[int] = AcceleratorState().num_processes for _ in range(__UpperCAmelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase ) else: self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' return self.scheduler.get_last_lr() def snake_case ( self ): '''simple docstring''' return self.scheduler.state_dict() def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' self.scheduler.load_state_dict(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' return self.scheduler.get_lr() def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.scheduler.print_lr(*__UpperCAmelCase , **__UpperCAmelCase )
293
"""simple docstring""" from __future__ import annotations __A = 1.6_021e-19 # units = C def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple[str, float]: """simple docstring""" if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
293
1
"""simple docstring""" from math import loga def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" if a < 0: raise ValueError('Input value must be a positive integer' ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('Input value must be a \'int\' type' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
293
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=1_8 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , ): '''simple docstring''' lowerCAmelCase__ :Dict = size if size is not None else {'height': 1_8, 'width': 1_8} lowerCAmelCase__ :Tuple = parent lowerCAmelCase__ :List[Any] = batch_size lowerCAmelCase__ :List[Any] = num_channels lowerCAmelCase__ :Any = image_size lowerCAmelCase__ :int = min_resolution lowerCAmelCase__ :int = max_resolution lowerCAmelCase__ :Dict = do_resize lowerCAmelCase__ :str = size lowerCAmelCase__ :Any = apply_ocr def snake_case ( self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :str = LayoutLMvaImageProcessor if is_pytesseract_available() else None def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = LayoutLMvaImageProcessingTester(self ) @property def snake_case ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'apply_ocr' ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} ) lowerCAmelCase__ :List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} ) def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , __UpperCAmelCase ) self.assertIsInstance(encoding.boxes , __UpperCAmelCase ) # Test batched lowerCAmelCase__ :Any = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowerCAmelCase__ :Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowerCAmelCase__ :Any = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCAmelCase__ :Tuple = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) lowerCAmelCase__ :int = Image.open(ds[0]['file'] ).convert('RGB' ) lowerCAmelCase__ :Optional[int] = image_processing(__UpperCAmelCase , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCAmelCase__ :Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 lowerCAmelCase__ :List[str] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __UpperCAmelCase ) self.assertListEqual(encoding.boxes , __UpperCAmelCase ) # with apply_OCR = False lowerCAmelCase__ :int = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = image_processing(__UpperCAmelCase , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
293
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __A = logging.get_logger(__name__) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Optional[Any] = ConsistencyModelPipeline __magic_name__ :Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __magic_name__ :Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt __magic_name__ :Optional[int] = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet' , ) return unet @property def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , ) return unet def snake_case ( self , __UpperCAmelCase=False ): '''simple docstring''' if class_cond: lowerCAmelCase__ :Dict = self.dummy_cond_unet else: lowerCAmelCase__ :List[str] = self.dummy_uncond_unet # Default to CM multistep sampler lowerCAmelCase__ :Any = CMStochasticIterativeScheduler( num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , ) lowerCAmelCase__ :List[Any] = { 'unet': unet, 'scheduler': scheduler, } return components def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' if str(__UpperCAmelCase ).startswith('mps' ): lowerCAmelCase__ :Dict = torch.manual_seed(__UpperCAmelCase ) else: lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [2_2, 0], 'generator': generator, 'output_type': 'np', } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :Dict = self.get_dummy_components() lowerCAmelCase__ :Optional[int] = ConsistencyModelPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase__ :Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ :Tuple = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :List[Any] = self.get_dummy_components(class_cond=__UpperCAmelCase ) lowerCAmelCase__ :int = ConsistencyModelPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Tuple = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :str = 0 lowerCAmelCase__ :Union[str, Any] = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase__ :Any = image[0, -3:, -3:, -1] lowerCAmelCase__ :Dict = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ :Optional[int] = ConsistencyModelPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :List[str] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = 1 lowerCAmelCase__ :Optional[Any] = None lowerCAmelCase__ :Any = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase__ :Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ :List[Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :Optional[Any] = self.get_dummy_components(class_cond=__UpperCAmelCase ) lowerCAmelCase__ :int = ConsistencyModelPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Any = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = 1 lowerCAmelCase__ :Dict = None lowerCAmelCase__ :int = 0 lowerCAmelCase__ :List[Any] = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase__ :int = image[0, -3:, -3:, -1] lowerCAmelCase__ :Union[str, Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=(1, 3, 6_4, 6_4) ): '''simple docstring''' lowerCAmelCase__ :str = torch.manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = { 'num_inference_steps': None, 'timesteps': [2_2, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: lowerCAmelCase__ :Optional[int] = self.get_fixed_latents(seed=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase , shape=__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = latents return inputs def snake_case ( self , __UpperCAmelCase=0 , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=(1, 3, 6_4, 6_4) ): '''simple docstring''' if type(__UpperCAmelCase ) == str: lowerCAmelCase__ :Tuple = torch.device(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase ) return latents def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ :List[str] = CMStochasticIterativeScheduler( num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , ) lowerCAmelCase__ :List[Any] = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :int = self.get_inputs() lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 6_4, 6_4, 3) lowerCAmelCase__ :Any = image[0, -3:, -3:, -1] lowerCAmelCase__ :Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ :str = CMStochasticIterativeScheduler( num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , ) lowerCAmelCase__ :Optional[Any] = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :List[str] = self.get_inputs() lowerCAmelCase__ :Tuple = 1 lowerCAmelCase__ :int = None lowerCAmelCase__ :Optional[int] = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 6_4, 6_4, 3) lowerCAmelCase__ :Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ :Union[str, Any] = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ :Dict = CMStochasticIterativeScheduler( num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , ) lowerCAmelCase__ :Tuple = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Dict = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ): lowerCAmelCase__ :List[str] = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 6_4, 6_4, 3) lowerCAmelCase__ :Any = image[0, -3:, -3:, -1] lowerCAmelCase__ :int = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ :Dict = CMStochasticIterativeScheduler( num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , ) lowerCAmelCase__ :Optional[Any] = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = 1 lowerCAmelCase__ :Tuple = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ): lowerCAmelCase__ :str = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 6_4, 6_4, 3) lowerCAmelCase__ :Optional[Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ :List[Any] = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
293
"""simple docstring""" import math def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be an integer" raise TypeError(_SCREAMING_SNAKE_CASE ) if number < 1: lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be > 0" raise ValueError(_SCREAMING_SNAKE_CASE ) elif number == 1: return 3 elif number == 2: return 5 else: lowerCAmelCase__ :Union[str, Any] = int(math.log(number // 3 , 2 ) ) + 2 lowerCAmelCase__ :Optional[Any] = [3, 5] lowerCAmelCase__ :Optional[Any] = 2 lowerCAmelCase__ :List[str] = 3 for block in range(1 , _SCREAMING_SNAKE_CASE ): for _ in range(_SCREAMING_SNAKE_CASE ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): __A = 0 try: __A = proth(number) except ValueError: print(F'''ValueError: there is no {number}th Proth number''') continue print(F'''The {number}th Proth number: {value}''')
293
1
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE = 100 ) ->int: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = (n * (n + 1) // 2) ** 2 lowerCAmelCase__ :Optional[Any] = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F'''{solution() = }''')
293
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __A = TypeVar("""KEY""") __A = TypeVar("""VAL""") @dataclass(frozen=a , slots=a ) class _lowerCAmelCase ( Generic[KEY, VAL] ): """simple docstring""" __magic_name__ :KEY __magic_name__ :VAL class _lowerCAmelCase ( _Item ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __bool__( self ): '''simple docstring''' return False __A = _DeletedItem() class _lowerCAmelCase ( MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.75 ): '''simple docstring''' lowerCAmelCase__ :List[str] = initial_block_size lowerCAmelCase__ :list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ :Tuple = capacity_factor lowerCAmelCase__ :str = 0 def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return hash(__UpperCAmelCase ) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return (ind + 1) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = self._buckets[ind] if not stored: lowerCAmelCase__ :Dict = _Item(__UpperCAmelCase , __UpperCAmelCase ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ :Optional[Any] = _Item(__UpperCAmelCase , __UpperCAmelCase ) return True else: return False def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ :Optional[Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self._buckets lowerCAmelCase__ :Tuple = [None] * new_size lowerCAmelCase__ :List[Any] = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self._get_bucket_index(__UpperCAmelCase ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ :Tuple = self._get_next_ind(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): break def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(__UpperCAmelCase , __UpperCAmelCase ) def __delitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): lowerCAmelCase__ :int = self._buckets[ind] if item is None: raise KeyError(__UpperCAmelCase ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ :List[str] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): lowerCAmelCase__ :str = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__UpperCAmelCase ) def __len__( self ): '''simple docstring''' return self._len def __iter__( self ): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ' ,'.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
293
1
"""simple docstring""" from __future__ import annotations def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) ->tuple[int, float, str]: """simple docstring""" lowerCAmelCase__ :List[str] = cipher_alphabet or [chr(_SCREAMING_SNAKE_CASE ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCAmelCase__ :List[Any] = { 'a': 0.0_8_4_9_7, 'b': 0.0_1_4_9_2, 'c': 0.0_2_2_0_2, 'd': 0.0_4_2_5_3, 'e': 0.1_1_1_6_2, 'f': 0.0_2_2_2_8, 'g': 0.0_2_0_1_5, 'h': 0.0_6_0_9_4, 'i': 0.0_7_5_4_6, 'j': 0.0_0_1_5_3, 'k': 0.0_1_2_9_2, 'l': 0.0_4_0_2_5, 'm': 0.0_2_4_0_6, 'n': 0.0_6_7_4_9, 'o': 0.0_7_5_0_7, 'p': 0.0_1_9_2_9, 'q': 0.0_0_0_9_5, 'r': 0.0_7_5_8_7, 's': 0.0_6_3_2_7, 't': 0.0_9_3_5_6, 'u': 0.0_2_7_5_8, 'v': 0.0_0_9_7_8, 'w': 0.0_2_5_6_0, 'x': 0.0_0_1_5_0, 'y': 0.0_1_9_9_4, 'z': 0.0_0_0_7_7, } else: # Custom frequencies dictionary lowerCAmelCase__ :str = frequencies_dict if not case_sensitive: lowerCAmelCase__ :str = ciphertext.lower() # Chi squared statistic values lowerCAmelCase__ :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_SCREAMING_SNAKE_CASE ) ): lowerCAmelCase__ :str = '' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCAmelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len( _SCREAMING_SNAKE_CASE ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCAmelCase__ :List[str] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCAmelCase__ :Tuple = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCAmelCase__ :Any = decrypted_with_shift.lower().count(_SCREAMING_SNAKE_CASE ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCAmelCase__ :Dict = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCAmelCase__ :Dict = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCAmelCase__ :Tuple = decrypted_with_shift.count(_SCREAMING_SNAKE_CASE ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCAmelCase__ :Tuple = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCAmelCase__ :List[Any] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCAmelCase__ :Any = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_SCREAMING_SNAKE_CASE ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCAmelCase__ :int = min( _SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) :List[str] = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
293
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel __A = logging.getLogger(__name__) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" if os.path.exists(_SCREAMING_SNAKE_CASE ): if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) else: os.makedirs(_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ :Dict = 2 if unlogit: lowerCAmelCase__ :List[str] = torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :str = p * torch.log(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[str] = 0 return -plogp.sum(dim=-1 ) def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" logger.info('lv, h >\t' + '\t'.join(F"{x + 1}" for x in range(len(_SCREAMING_SNAKE_CASE ) ) ) ) for row in range(len(_SCREAMING_SNAKE_CASE ) ): if tensor.dtype != torch.long: logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:.5f}" for x in tensor[row].cpu().data ) ) else: logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:d}" for x in tensor[row].cpu().data ) ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ :Dict = model.config.num_hidden_layers, model.config.num_attention_heads lowerCAmelCase__ :Any = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) lowerCAmelCase__ :Tuple = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) if head_mask is None: lowerCAmelCase__ :Optional[int] = torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) head_mask.requires_grad_(requires_grad=_SCREAMING_SNAKE_CASE ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCAmelCase__ :List[str] = None lowerCAmelCase__ :Any = 0.0 lowerCAmelCase__ :Any = 0.0 for step, inputs in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): lowerCAmelCase__ :str = tuple(t.to(args.device ) for t in inputs ) ((lowerCAmelCase__) , ) :Dict = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCAmelCase__ :str = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = entropy(attn.detach() , _SCREAMING_SNAKE_CASE ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_SCREAMING_SNAKE_CASE ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCAmelCase__ :Union[str, Any] = 2 lowerCAmelCase__ :Tuple = torch.pow(torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: lowerCAmelCase__ :str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) logger.info('Head ranked by importance scores' ) lowerCAmelCase__ :List[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCAmelCase__ :List[Any] = torch.arange( head_importance.numel() , device=args.device ) lowerCAmelCase__ :int = head_ranks.view_as(_SCREAMING_SNAKE_CASE ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) return attn_entropy, head_importance, total_loss def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , _SCREAMING_SNAKE_CASE , original_score * args.masking_threshold ) lowerCAmelCase__ :Optional[int] = torch.ones_like(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCAmelCase__ :List[str] = original_score while current_score >= original_score * args.masking_threshold: lowerCAmelCase__ :List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCAmelCase__ :str = float('Inf' ) lowerCAmelCase__ :List[str] = head_importance.view(-1 ).sort()[1] if len(_SCREAMING_SNAKE_CASE ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads lowerCAmelCase__ :int = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) lowerCAmelCase__ :Dict = new_head_mask.view(-1 ) lowerCAmelCase__ :Any = 0.0 lowerCAmelCase__ :Tuple = new_head_mask.view_as(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[int] = new_head_mask.clone().detach() print_ad_tensor(_SCREAMING_SNAKE_CASE ) # Compute metric and head importance again lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , _SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('Final head mask' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :Union[str, Any] = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = 1 / loss lowerCAmelCase__ :Tuple = datetime.now() - before_time lowerCAmelCase__ :List[str] = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ :List[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_SCREAMING_SNAKE_CASE ) ) } for k, v in heads_to_prune.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Union[str, Any] = [ v, ] assert sum(len(_SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ :int = datetime.now() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , actually_pruned=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ :int = 1 / loss lowerCAmelCase__ :Tuple = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 100 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 ) save_model(_SCREAMING_SNAKE_CASE , args.output_dir ) def __A () ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=_SCREAMING_SNAKE_CASE , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=_SCREAMING_SNAKE_CASE , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=_SCREAMING_SNAKE_CASE , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=128 , type=_SCREAMING_SNAKE_CASE , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=_SCREAMING_SNAKE_CASE , help='Batch size.' ) parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 ) parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) lowerCAmelCase__ :Any = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCAmelCase__ :List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) lowerCAmelCase__ :Optional[int] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCAmelCase__ :Dict = torch.device('cuda' , args.local_rank ) lowerCAmelCase__ :Tuple = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCAmelCase__ :int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCAmelCase__ :Optional[Any] = nn.parallel.DistributedDataParallel( _SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_SCREAMING_SNAKE_CASE ) elif args.n_gpu > 1: lowerCAmelCase__ :Union[str, Any] = nn.DataParallel(_SCREAMING_SNAKE_CASE ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE ) # Prepare dataset lowerCAmelCase__ :Optional[int] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCAmelCase__ :Union[str, Any] = (torch.from_numpy(_SCREAMING_SNAKE_CASE ),) lowerCAmelCase__ :Optional[int] = TensorDataset(*_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = RandomSampler(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Dict = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCAmelCase__ :Optional[Any] = mask_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) prune_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
293
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=a ): """simple docstring""" __magic_name__ :Dict = ["""torch""", """torchsde"""] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(self , ['torch', 'torchsde'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'torchsde'] ) @classmethod def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['torch', 'torchsde'] )
293
"""simple docstring""" import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = 1_0 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = [1, 2, 3, 4] lowerCAmelCase__ :Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = '' lowerCAmelCase__ , lowerCAmelCase__ :Any = process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) self.assertEqual(__UpperCAmelCase , [] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) lowerCAmelCase__ , lowerCAmelCase__ :str = process_story(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :List[str] = ['It was the best of times.'] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = torch.tensor([1, 2, 3, 4] ) lowerCAmelCase__ :List[str] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowerCAmelCase__ :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 2_3 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowerCAmelCase__ :Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 1_0_1 lowerCAmelCase__ :str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowerCAmelCase__ :Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowerCAmelCase__ :List[Any] = compute_token_type_ids(__UpperCAmelCase , __UpperCAmelCase ) np.testing.assert_array_equal(__UpperCAmelCase , __UpperCAmelCase )
293
1
"""simple docstring""" from torch import nn def __A (_SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"Unsupported activation function: {act_fn}" )
293
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 'hf-internal-testing/tiny-random-t5' lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Any = tokenizer('This is me' , return_tensors='pt' ) lowerCAmelCase__ :Dict = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) lowerCAmelCase__ :Optional[Any] = model.generate(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) lowerCAmelCase__ :Union[str, Any] = model_reloaded.generate(**__UpperCAmelCase ) self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = 'hf-internal-testing/tiny-random-t5' lowerCAmelCase__ :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__UpperCAmelCase ): model.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = model.reverse_bettertransformer() model.save_pretrained(__UpperCAmelCase )
293
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} __A = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } __A = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off __A = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Any = VOCAB_FILES_NAMES __magic_name__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __magic_name__ :List[str] = ["""input_ids""", """attention_mask"""] __magic_name__ :Optional[Any] = MBartTokenizer __magic_name__ :List[int] = [] __magic_name__ :List[int] = [] def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) lowerCAmelCase__ :int = vocab_file lowerCAmelCase__ :int = False if not self.vocab_file else True lowerCAmelCase__ :int = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) lowerCAmelCase__ :Tuple = { lang_code: self.convert_tokens_to_ids(__UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowerCAmelCase__ :Optional[int] = src_lang if src_lang is not None else 'en_XX' lowerCAmelCase__ :Tuple = self.convert_tokens_to_ids(self._src_lang ) lowerCAmelCase__ :Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def snake_case ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :int = [self.sep_token_id] lowerCAmelCase__ :Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) lowerCAmelCase__ :Optional[Any] = src_lang lowerCAmelCase__ :Dict = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ :Any = self.convert_tokens_to_ids(__UpperCAmelCase ) lowerCAmelCase__ :str = tgt_lang_id return inputs def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = "en_XX" , __UpperCAmelCase = None , __UpperCAmelCase = "ro_RO" , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :str = src_lang lowerCAmelCase__ :str = tgt_lang return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def snake_case ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.convert_tokens_to_ids(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = [] lowerCAmelCase__ :Any = [self.eos_token_id, self.cur_lang_code] lowerCAmelCase__ :Dict = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCAmelCase__ :Tuple = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCAmelCase__ :str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = self.convert_tokens_to_ids(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = [] lowerCAmelCase__ :Tuple = [self.eos_token_id, self.cur_lang_code] lowerCAmelCase__ :List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCAmelCase__ :Tuple = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCAmelCase__ :List[str] = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(__UpperCAmelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory." ) return lowerCAmelCase__ :str = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] __A = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] __A = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): __A = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return getitem, k def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return setitem, k, v def __A (_SCREAMING_SNAKE_CASE ) ->Tuple: """simple docstring""" return delitem, k def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) ->Union[str, Any]: """simple docstring""" try: return fun(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ), None except Exception as e: return None, e __A = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) __A = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] __A = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] __A = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] __A = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] __A = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( 'operations' , ( pytest.param(_add_items , id='add items' ), pytest.param(_overwrite_items , id='overwrite items' ), pytest.param(_delete_items , id='delete items' ), pytest.param(_access_absent_items , id='access absent items' ), pytest.param(_add_with_resize_up , id='add with resize up' ), pytest.param(_add_with_resize_down , id='add with resize down' ), ) , ) def __A (_SCREAMING_SNAKE_CASE ) ->Any: """simple docstring""" lowerCAmelCase__ :List[str] = HashMap(initial_block_size=4 ) lowerCAmelCase__ :Optional[Any] = {} for _, (fun, *args) in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = _run_operation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ , lowerCAmelCase__ :List[str] = _run_operation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) assert my_res == py_res assert str(_SCREAMING_SNAKE_CASE ) == str(_SCREAMING_SNAKE_CASE ) assert set(_SCREAMING_SNAKE_CASE ) == set(_SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) assert set(my.items() ) == set(py.items() ) def __A () ->int: """simple docstring""" def is_public(_SCREAMING_SNAKE_CASE ) -> bool: return not name.startswith('_' ) lowerCAmelCase__ :List[Any] = {name for name in dir({} ) if is_public(_SCREAMING_SNAKE_CASE )} lowerCAmelCase__ :Tuple = {name for name in dir(HashMap() ) if is_public(_SCREAMING_SNAKE_CASE )} assert dict_public_names > hash_public_names
293
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowerCAmelCase ( yaml.SafeLoader ): """simple docstring""" def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase__ :str = [tuple(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else key for key in keys] lowerCAmelCase__ :Optional[int] = Counter(__UpperCAmelCase ) lowerCAmelCase__ :int = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase ) self._check_no_duplicates_on_constructed_node(__UpperCAmelCase ) return mapping def __A (_SCREAMING_SNAKE_CASE ) ->Tuple[Optional[str], str]: """simple docstring""" lowerCAmelCase__ :Optional[Any] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase__ :Optional[int] = full_content[1:].index('---' ) + 1 lowerCAmelCase__ :Union[str, Any] = '\n'.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :List[str] = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(__UpperCAmelCase ) else: return cls() def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path.exists(): with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ :Optional[Any] = readme_file.read() else: lowerCAmelCase__ :Union[str, Any] = None lowerCAmelCase__ :Union[str, Any] = self._to_readme(__UpperCAmelCase ) with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file: readme_file.write(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase = None ): '''simple docstring''' if readme_content is not None: lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = _split_yaml_from_readme(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content else: lowerCAmelCase__ :str = '---\n' + self.to_yaml_string() + '---\n' return full_content @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase__ :int = { (key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' return yaml.safe_dump( { (key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' ) __A = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser __A = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") __A = ap.parse_args() __A = Path(args.readme_filepath) __A = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
293
1
"""simple docstring""" import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :str = (DDPMScheduler,) def snake_case ( self , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**__UpperCAmelCase ) return config def snake_case ( self ): '''simple docstring''' for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' self.check_over_configs(thresholding=__UpperCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , ) def snake_case ( self ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ :Optional[int] = self.get_scheduler_config() lowerCAmelCase__ :List[Any] = scheduler_class(**__UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ :Optional[int] = self.get_scheduler_config() lowerCAmelCase__ :Optional[int] = scheduler_class(**__UpperCAmelCase ) lowerCAmelCase__ :str = len(__UpperCAmelCase ) lowerCAmelCase__ :str = self.dummy_model() lowerCAmelCase__ :Any = self.dummy_sample_deter lowerCAmelCase__ :int = torch.manual_seed(0 ) for t in reversed(range(__UpperCAmelCase ) ): # 1. predict noise residual lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ :Optional[Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase__ :Optional[int] = pred_prev_sample lowerCAmelCase__ :Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ :Optional[int] = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2 assert abs(result_mean.item() - 0.33_72 ) < 1E-3 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.scheduler_classes[0] lowerCAmelCase__ :Tuple = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase__ :Tuple = scheduler_class(**__UpperCAmelCase ) lowerCAmelCase__ :Dict = len(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = self.dummy_model() lowerCAmelCase__ :str = self.dummy_sample_deter lowerCAmelCase__ :Dict = torch.manual_seed(0 ) for t in reversed(range(__UpperCAmelCase ) ): # 1. predict noise residual lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ :List[Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase__ :List[str] = pred_prev_sample lowerCAmelCase__ :Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ :Tuple = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2 assert abs(result_mean.item() - 0.26_31 ) < 1E-3 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ :Tuple = self.get_scheduler_config() lowerCAmelCase__ :Union[str, Any] = scheduler_class(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=__UpperCAmelCase ) lowerCAmelCase__ :str = scheduler.timesteps for i, timestep in enumerate(__UpperCAmelCase ): if i == len(__UpperCAmelCase ) - 1: lowerCAmelCase__ :str = -1 else: lowerCAmelCase__ :Any = timesteps[i + 1] lowerCAmelCase__ :Optional[int] = scheduler.previous_timestep(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = prev_t.item() self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.scheduler_classes[0] lowerCAmelCase__ :Optional[int] = self.get_scheduler_config() lowerCAmelCase__ :Dict = scheduler_class(**__UpperCAmelCase ) lowerCAmelCase__ :Dict = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(__UpperCAmelCase , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ :Any = self.get_scheduler_config() lowerCAmelCase__ :List[Any] = scheduler_class(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = [1_0_0, 8_7, 5_0, 1, 0] lowerCAmelCase__ :Optional[int] = len(__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=__UpperCAmelCase , timesteps=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = self.scheduler_classes[0] lowerCAmelCase__ :List[Any] = self.get_scheduler_config() lowerCAmelCase__ :Any = scheduler_class(**__UpperCAmelCase ) lowerCAmelCase__ :List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( __UpperCAmelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=__UpperCAmelCase )
293
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
293
1
"""simple docstring""" from __future__ import annotations from collections import namedtuple def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple: """simple docstring""" lowerCAmelCase__ :Optional[int] = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
293
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __A = logging.getLogger(__name__) class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase=-1 ): '''simple docstring''' lowerCAmelCase__ :Dict = label_idx def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Optional[Any] = mode.value lowerCAmelCase__ :List[str] = os.path.join(__UpperCAmelCase , F"{mode}.txt" ) lowerCAmelCase__ :List[str] = 1 lowerCAmelCase__ :Union[str, Any] = [] with open(__UpperCAmelCase , encoding='utf-8' ) as f: lowerCAmelCase__ :str = [] lowerCAmelCase__ :Dict = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 lowerCAmelCase__ :Tuple = [] lowerCAmelCase__ :List[str] = [] else: lowerCAmelCase__ :List[str] = line.split(' ' ) words.append(splits[0] ) if len(__UpperCAmelCase ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) return examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(__UpperCAmelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowerCAmelCase__ :Optional[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(__UpperCAmelCase ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: lowerCAmelCase__ :Any = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ :Union[str, Any] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__(label_idx=-2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: lowerCAmelCase__ :str = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ :Optional[Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _lowerCAmelCase ( a ): """simple docstring""" def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = mode.value lowerCAmelCase__ :Union[str, Any] = os.path.join(__UpperCAmelCase , F"{mode}.txt" ) lowerCAmelCase__ :Any = 1 lowerCAmelCase__ :Optional[Any] = [] with open(__UpperCAmelCase , encoding='utf-8' ) as f: for sentence in parse_incr(__UpperCAmelCase ): lowerCAmelCase__ :Dict = [] lowerCAmelCase__ :Dict = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) ) guid_index += 1 return examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = 0 for sentence in parse_incr(__UpperCAmelCase ): lowerCAmelCase__ :Optional[int] = preds_list[example_id] lowerCAmelCase__ :Tuple = '' for token in sentence: out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(__UpperCAmelCase ) example_id += 1 def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path: with open(__UpperCAmelCase , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
293
1
"""simple docstring""" from sklearn.metrics import fa_score import datasets __A = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ __A = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ __A = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def snake_case ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase="binary" , __UpperCAmelCase=None ): '''simple docstring''' lowerCAmelCase__ :int = fa_score( __UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase ) return {"f1": float(__UpperCAmelCase ) if score.size == 1 else score}
293
"""simple docstring""" from __future__ import annotations __A = tuple[int, int, int] __A = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase __A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" # -------------------------- default selection -------------------------- # rotors -------------------------- __A = """EGZWVONAHDCLFQMSIPJBYUKXTR""" __A = """FOBHMDKEXQNRAULPGSJVTYICZW""" __A = """ZJXESIUQLHAVRMDOYGTNFWPBKC""" # reflector -------------------------- __A = { """A""": """N""", """N""": """A""", """B""": """O""", """O""": """B""", """C""": """P""", """P""": """C""", """D""": """Q""", """Q""": """D""", """E""": """R""", """R""": """E""", """F""": """S""", """S""": """F""", """G""": """T""", """T""": """G""", """H""": """U""", """U""": """H""", """I""": """V""", """V""": """I""", """J""": """W""", """W""": """J""", """K""": """X""", """X""": """K""", """L""": """Y""", """Y""": """L""", """M""": """Z""", """Z""": """M""", } # -------------------------- extra rotors -------------------------- __A = """RMDJXFUWGISLHVTCQNKYPBEZOA""" __A = """SGLCPQWZHKXAREONTFBVIYJUDM""" __A = """HVSICLTYKQUBXDWAJZOMFGPREN""" __A = """RZWQHFMVDBKICJLNTUXAGYPSOE""" __A = """LFKIJODBEGAMQPXVUHYSTCZRWN""" __A = """KOAEGVDHXPQZMLFTYWJNBRCIUS""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: """simple docstring""" if (unique_rotsel := len(set(_SCREAMING_SNAKE_CASE ) )) < 3: lowerCAmelCase__ :Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})" raise Exception(_SCREAMING_SNAKE_CASE ) # Checks if rotor positions are valid lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = rotpos if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Tuple = F"First rotor position is not within range of 1..26 ({rotorposa}" raise ValueError(_SCREAMING_SNAKE_CASE ) if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_SCREAMING_SNAKE_CASE ) if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Union[str, Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_SCREAMING_SNAKE_CASE ) # Validates string and returns dict lowerCAmelCase__ :int = _plugboard(_SCREAMING_SNAKE_CASE ) return rotpos, rotsel, pbdict def __A (_SCREAMING_SNAKE_CASE ) ->dict[str, str]: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :str = F"Plugboard setting isn't type string ({type(_SCREAMING_SNAKE_CASE )})" raise TypeError(_SCREAMING_SNAKE_CASE ) elif len(_SCREAMING_SNAKE_CASE ) % 2 != 0: lowerCAmelCase__ :str = F"Odd number of symbols ({len(_SCREAMING_SNAKE_CASE )})" raise Exception(_SCREAMING_SNAKE_CASE ) elif pbstring == "": return {} pbstring.replace(' ' , '' ) # Checks if all characters are unique lowerCAmelCase__ :Any = set() for i in pbstring: if i not in abc: lowerCAmelCase__ :Any = F"'{i}' not in list of symbols" raise Exception(_SCREAMING_SNAKE_CASE ) elif i in tmppbl: lowerCAmelCase__ :Dict = F"Duplicate symbol ({i})" raise Exception(_SCREAMING_SNAKE_CASE ) else: tmppbl.add(_SCREAMING_SNAKE_CASE ) del tmppbl # Created the dictionary lowerCAmelCase__ :List[Any] = {} for j in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ): lowerCAmelCase__ :Optional[int] = pbstring[j + 1] lowerCAmelCase__ :Union[str, Any] = pbstring[j] return pb def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE = "" , ) ->str: """simple docstring""" lowerCAmelCase__ :Tuple = text.upper() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = _validator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , plugb.upper() ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = rotor_position lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 lowerCAmelCase__ :Dict = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: lowerCAmelCase__ :Dict = plugboard[symbol] # rotor ra -------------------------- lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :str = rotora[index % len(_SCREAMING_SNAKE_CASE )] # rotor rb -------------------------- lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :int = rotora[index % len(_SCREAMING_SNAKE_CASE )] # rotor rc -------------------------- lowerCAmelCase__ :str = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa lowerCAmelCase__ :Optional[Any] = rotora[index % len(_SCREAMING_SNAKE_CASE )] # reflector -------------------------- # this is the reason you don't need another machine to decipher lowerCAmelCase__ :str = reflector[symbol] # 2nd rotors lowerCAmelCase__ :Tuple = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] lowerCAmelCase__ :Optional[int] = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] lowerCAmelCase__ :Any = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa] # 2nd plugboard if symbol in plugboard: lowerCAmelCase__ :Union[str, Any] = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :str = 0 rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :List[Any] = 0 rotorposa += 1 if rotorposa >= len(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ :Optional[Any] = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_SCREAMING_SNAKE_CASE ) return "".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = """This is my Python script that emulates the Enigma machine from WWII.""" __A = (1, 1, 1) __A = """pictures""" __A = (rotora, rotora, rotora) __A = enigma(message, rotor_pos, rotor_sel, pb) print("""Encrypted message:""", en) print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
293
1
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :int = ["""image_processor""", """tokenizer"""] __magic_name__ :Optional[int] = """BlipImageProcessor""" __magic_name__ :Optional[int] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = False super().__init__(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = self.image_processor def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: lowerCAmelCase__ :Optional[int] = self.tokenizer lowerCAmelCase__ :Union[str, Any] = self.tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) return text_encoding # add pixel_values lowerCAmelCase__ :Union[str, Any] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) if text is not None: lowerCAmelCase__ :Optional[int] = self.tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) else: lowerCAmelCase__ :Any = None if text_encoding is not None: encoding_image_processor.update(__UpperCAmelCase ) return encoding_image_processor def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.tokenizer.model_input_names lowerCAmelCase__ :List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
293
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def __A (_SCREAMING_SNAKE_CASE ) ->Tuple: """simple docstring""" lowerCAmelCase__ :List[str] = np.max(_outputs , axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Any = """sigmoid""" __magic_name__ :Optional[Any] = """softmax""" __magic_name__ :Optional[Any] = """none""" @add_end_docstrings( a , r""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `\"default\"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `\"sigmoid\"`: Applies the sigmoid function on the output. - `\"softmax\"`: Applies the softmax function on the output. - `\"none\"`: Does not apply any function on the output. """ , ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Union[str, Any] = False __magic_name__ :Dict = ClassificationFunction.NONE def __init__( self , **__UpperCAmelCase ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = tokenizer_kwargs lowerCAmelCase__ :List[Any] = {} if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None: lowerCAmelCase__ :List[Any] = self.model.config.return_all_scores if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None: lowerCAmelCase__ :int = top_k lowerCAmelCase__ :Dict = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __UpperCAmelCase , ) if return_all_scores: lowerCAmelCase__ :List[Any] = None else: lowerCAmelCase__ :Union[str, Any] = 1 if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowerCAmelCase__ :List[Any] = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowerCAmelCase__ :Optional[Any] = 'top_k' not in kwargs if isinstance(args[0] , __UpperCAmelCase ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = self.framework if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.model(**__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True ): '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowerCAmelCase__ :str = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowerCAmelCase__ :int = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None: lowerCAmelCase__ :Optional[Any] = self.model.config.function_to_apply else: lowerCAmelCase__ :Dict = ClassificationFunction.NONE lowerCAmelCase__ :int = model_outputs['logits'][0] lowerCAmelCase__ :Union[str, Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowerCAmelCase__ :Dict = sigmoid(__UpperCAmelCase ) elif function_to_apply == ClassificationFunction.SOFTMAX: lowerCAmelCase__ :int = softmax(__UpperCAmelCase ) elif function_to_apply == ClassificationFunction.NONE: lowerCAmelCase__ :Tuple = outputs else: raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowerCAmelCase__ :Any = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCAmelCase ) ] if not _legacy: dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase ) if top_k is not None: lowerCAmelCase__ :List[str] = dict_scores[:top_k] return dict_scores
293
1
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __A = """pt""" elif is_tf_available(): __A = """tf""" else: __A = """jax""" class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Dict = ByTaTokenizer __magic_name__ :Dict = False def snake_case ( self ): '''simple docstring''' super().setUp() lowerCAmelCase__ :Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case ( self ): '''simple docstring''' return ByTaTokenizer.from_pretrained('google/byt5-small' ) def snake_case ( self , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2_0 , __UpperCAmelCase=5 ): '''simple docstring''' lowerCAmelCase__ :List[Any] = [] for i in range(len(__UpperCAmelCase ) ): try: lowerCAmelCase__ :Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowerCAmelCase__ :Dict = list(filter(lambda __UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __UpperCAmelCase ) ) lowerCAmelCase__ :List[str] = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) ) if max_length is not None and len(__UpperCAmelCase ) > max_length: lowerCAmelCase__ :Any = toks[:max_length] if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0: while len(__UpperCAmelCase ) < min_length: lowerCAmelCase__ :Union[str, Any] = toks + toks # toks_str = [t[1] for t in toks] lowerCAmelCase__ :int = [t[0] for t in toks] # Ensure consistency lowerCAmelCase__ :str = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) if " " not in output_txt and len(__UpperCAmelCase ) > 1: lowerCAmelCase__ :List[Any] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase ) ) if with_prefix_space: lowerCAmelCase__ :List[str] = ' ' + output_txt lowerCAmelCase__ :Optional[int] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) return output_txt, output_ids def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = self.ta_base_tokenizer lowerCAmelCase__ :List[str] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) lowerCAmelCase__ :Optional[Any] = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.ta_base_tokenizer lowerCAmelCase__ :Optional[Any] = 'Unicode €.' lowerCAmelCase__ :Optional[int] = tokenizer(__UpperCAmelCase ) lowerCAmelCase__ :Any = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1] self.assertEqual(encoded['input_ids'] , __UpperCAmelCase ) # decoding lowerCAmelCase__ :Optional[int] = tokenizer.decode(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , 'Unicode €.</s>' ) lowerCAmelCase__ :str = tokenizer('e è é ê ë' ) lowerCAmelCase__ :Dict = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1] self.assertEqual(encoded['input_ids'] , __UpperCAmelCase ) # decoding lowerCAmelCase__ :Union[str, Any] = tokenizer.decode(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.ta_base_tokenizer lowerCAmelCase__ :str = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off lowerCAmelCase__ :Tuple = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0] # fmt: on lowerCAmelCase__ :Optional[int] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) if FRAMEWORK != "jax": lowerCAmelCase__ :Tuple = list(batch.input_ids.numpy()[0] ) else: lowerCAmelCase__ :Optional[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual((2, 3_7) , batch.input_ids.shape ) self.assertEqual((2, 3_7) , batch.attention_mask.shape ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.ta_base_tokenizer lowerCAmelCase__ :Tuple = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , __UpperCAmelCase ) self.assertIn('attention_mask' , __UpperCAmelCase ) self.assertNotIn('decoder_input_ids' , __UpperCAmelCase ) self.assertNotIn('decoder_attention_mask' , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.ta_base_tokenizer lowerCAmelCase__ :Any = [ 'Summary of the text.', 'Another summary.', ] lowerCAmelCase__ :List[str] = tokenizer( text_target=__UpperCAmelCase , max_length=3_2 , padding='max_length' , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = self.ta_base_tokenizer lowerCAmelCase__ :Optional[Any] = ['A long paragraph for summarization. </s>'] lowerCAmelCase__ :List[str] = ['Summary of the text. </s>'] # fmt: off lowerCAmelCase__ :Union[str, Any] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1] lowerCAmelCase__ :List[str] = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1] # fmt: on lowerCAmelCase__ :Tuple = tokenizer(__UpperCAmelCase , text_target=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , batch['input_ids'][0] ) self.assertEqual(__UpperCAmelCase , batch['labels'][0] ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test lowerCAmelCase__ :Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase__ :List[str] = tempfile.mkdtemp() lowerCAmelCase__ :Any = ' He is very happy, UNwant\u00E9d,running' lowerCAmelCase__ :Tuple = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) tokenizer.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Dict = tokenizer.__class__.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) shutil.rmtree(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase__ :List[Any] = tempfile.mkdtemp() lowerCAmelCase__ :str = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) lowerCAmelCase__ :Any = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) lowerCAmelCase__ :int = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) tokenizer.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :str = tokenizer.__class__.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) lowerCAmelCase__ :Union[str, Any] = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__UpperCAmelCase ) with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: lowerCAmelCase__ :List[str] = json.load(__UpperCAmelCase ) with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: lowerCAmelCase__ :str = json.load(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = [F"<extra_id_{i}>" for i in range(1_2_5 )] lowerCAmelCase__ :Any = added_tokens_extra_ids + [ 'an_additional_special_token' ] lowerCAmelCase__ :Dict = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(__UpperCAmelCase , __UpperCAmelCase ) with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(__UpperCAmelCase , __UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCAmelCase__ :Any = tokenizer_class.from_pretrained( __UpperCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCAmelCase__ :Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__UpperCAmelCase )] lowerCAmelCase__ :Optional[int] = tokenizer_class.from_pretrained( __UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = tokenizer_class.from_pretrained(__UpperCAmelCase ) self.assertTrue(tokenizer.decode([2_5_5] ) == '' ) def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): lowerCAmelCase__ :Optional[int] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] lowerCAmelCase__ :List[str] = tokenizer.convert_tokens_to_string(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): lowerCAmelCase__ :int = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] lowerCAmelCase__ :Any = 0 lowerCAmelCase__ :List[Any] = tokenizer.convert_ids_to_tokens( __UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) for attr in attributes_list: setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase ) self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase ) setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase ) self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase ) setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [] ) setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
293
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) lowerCAmelCase__ :Union[str, Any] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) ) return round(_SCREAMING_SNAKE_CASE , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
293
1
"""simple docstring""" import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class _lowerCAmelCase : """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=6_4 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' lowerCAmelCase__ :Any = parent lowerCAmelCase__ :Tuple = batch_size lowerCAmelCase__ :List[Any] = seq_length lowerCAmelCase__ :str = is_training lowerCAmelCase__ :Optional[Any] = use_input_mask lowerCAmelCase__ :Tuple = use_token_type_ids lowerCAmelCase__ :int = use_labels lowerCAmelCase__ :Optional[int] = vocab_size lowerCAmelCase__ :List[str] = hidden_size lowerCAmelCase__ :Any = num_hidden_layers lowerCAmelCase__ :str = num_attention_heads lowerCAmelCase__ :Optional[Any] = intermediate_size lowerCAmelCase__ :Optional[int] = hidden_act lowerCAmelCase__ :Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ :Tuple = attention_probs_dropout_prob lowerCAmelCase__ :Optional[Any] = max_position_embeddings lowerCAmelCase__ :List[str] = type_vocab_size lowerCAmelCase__ :Optional[int] = type_sequence_label_size lowerCAmelCase__ :Optional[Any] = initializer_range lowerCAmelCase__ :Optional[Any] = num_labels lowerCAmelCase__ :Tuple = num_choices lowerCAmelCase__ :List[Any] = scope lowerCAmelCase__ :List[str] = vocab_size - 1 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ :Union[str, Any] = None if self.use_input_mask: lowerCAmelCase__ :int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ :List[str] = None if self.use_labels: lowerCAmelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ :Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def snake_case ( self ): '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = self.prepare_config_and_inputs() lowerCAmelCase__ :str = True return config, input_ids, input_mask, token_labels def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :int = GPTNeoXModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = True lowerCAmelCase__ :Optional[Any] = GPTNeoXModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = GPTNeoXForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :str = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = self.num_labels lowerCAmelCase__ :Tuple = GPTNeoXForQuestionAnswering(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self.num_labels lowerCAmelCase__ :int = GPTNeoXForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ :int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.num_labels lowerCAmelCase__ :str = GPTNeoXForTokenClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = True lowerCAmelCase__ :List[str] = GPTNeoXForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase__ :List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase__ :Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase__ :Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase__ :Any = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = output_from_no_past['hidden_states'][0] lowerCAmelCase__ :str = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['hidden_states'][0] # select random slice lowerCAmelCase__ :Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase__ :Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase__ :Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = config_and_inputs lowerCAmelCase__ :Tuple = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( a , a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[Any] = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) __magic_name__ :Optional[int] = (GPTNeoXForCausalLM,) if is_torch_available() else () __magic_name__ :Optional[Any] = ( { """feature-extraction""": GPTNeoXModel, """question-answering""": GPTNeoXForQuestionAnswering, """text-classification""": GPTNeoXForSequenceClassification, """text-generation""": GPTNeoXForCausalLM, """token-classification""": GPTNeoXForTokenClassification, """zero-shot""": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ :Union[str, Any] = False __magic_name__ :Dict = False __magic_name__ :Optional[int] = False __magic_name__ :int = False def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = GPTNeoXModelTester(self ) lowerCAmelCase__ :Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=6_4 , num_attention_heads=8 ) def snake_case ( self ): '''simple docstring''' self.config_tester.run_common_tests() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs_for_decoder() lowerCAmelCase__ :Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @unittest.skip(reason='Feed forward chunking is not implemented' ) def snake_case ( self ): '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ :Dict = ids_tensor([1, 1_0] , config.vocab_size ) lowerCAmelCase__ :Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase__ :Union[str, Any] = GPTNeoXModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() lowerCAmelCase__ :int = original_model(__UpperCAmelCase ).last_hidden_state lowerCAmelCase__ :List[Any] = original_model(__UpperCAmelCase ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase__ :Optional[Any] = {'type': scaling_type, 'factor': 10.0} lowerCAmelCase__ :str = GPTNeoXModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() lowerCAmelCase__ :Any = scaled_model(__UpperCAmelCase ).last_hidden_state lowerCAmelCase__ :List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' ) for checkpointing in [True, False]: lowerCAmelCase__ :str = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = tokenizer('My favorite food is' , return_tensors='pt' ).to(__UpperCAmelCase ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 lowerCAmelCase__ :Union[str, Any] = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' lowerCAmelCase__ :Any = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=2_0 ) lowerCAmelCase__ :int = tokenizer.batch_decode(__UpperCAmelCase )[0] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""OwlViTFeatureExtractor"""] __A = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["""GLPNFeatureExtractor"""] __A = ["""GLPNImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""", """GLPNForDepthEstimation""", """GLPNLayer""", """GLPNModel""", """GLPNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' pass def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __A = ( """https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png""" ) @is_pipeline_test @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __magic_name__ :str = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = pipeline( 'document-question-answering' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) lowerCAmelCase__ :List[Any] = 'What is the placebo?' lowerCAmelCase__ :Dict = [ { 'image': load_image(__UpperCAmelCase ), 'question': question, }, { 'image': image, 'question': question, }, { 'image': image, 'question': question, 'word_boxes': word_boxes, }, ] return dqa_pipeline, examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :int = dqa_pipeline(__UpperCAmelCase , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )}, {'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' ) lowerCAmelCase__ :Union[str, Any] = INVOICE_URL lowerCAmelCase__ :Tuple = 'How many cats are there?' lowerCAmelCase__ :List[str] = [ {'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9}, {'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0}, ] lowerCAmelCase__ :Any = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase ) lowerCAmelCase__ :Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably lowerCAmelCase__ :List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png' lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(__UpperCAmelCase , [] ) # We can optionnally pass directly the words and bounding boxes lowerCAmelCase__ :Dict = './tests/fixtures/tests_samples/COCO/000000039769.png' lowerCAmelCase__ :List[str] = [] lowerCAmelCase__ :int = [] lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 ) self.assertEqual(__UpperCAmelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , ) lowerCAmelCase__ :str = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :Dict = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , ) lowerCAmelCase__ :List[Any] = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :Optional[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :int = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, {'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , ) lowerCAmelCase__ :List[str] = INVOICE_URL lowerCAmelCase__ :Any = 'What is the invoice number?' lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) lowerCAmelCase__ :Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] ] * 2 , ) lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) # This model should also work if `image` is set to None lowerCAmelCase__ :Tuple = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3}, ] , ) @slow @require_torch @require_pytesseract @require_vision def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , max_seq_len=5_0 , ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :List[Any] = 'What is the invoice number?' lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) lowerCAmelCase__ :List[str] = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] ] * 2 , ) lowerCAmelCase__ :Optional[Any] = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) ) # This model should also work if `image` is set to None lowerCAmelCase__ :List[str] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, {'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6}, ] , ) @slow @require_torch def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = pipeline( 'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , ) lowerCAmelCase__ :Dict = INVOICE_URL lowerCAmelCase__ :str = 'What is the invoice number?' lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'answer': 'us-001'}] ) @require_tf @unittest.skip('Document question answering not implemented in TF' ) def snake_case ( self ): '''simple docstring''' pass
293
1
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __A = random.Random() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]: """simple docstring""" if rng is None: lowerCAmelCase__ :List[Any] = global_rng lowerCAmelCase__ :List[str] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=2_0_0_0 , __UpperCAmelCase=2_0_4_8 , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_4_1_0_0 , ): '''simple docstring''' lowerCAmelCase__ :Dict = parent lowerCAmelCase__ :Dict = batch_size lowerCAmelCase__ :Union[str, Any] = min_seq_length lowerCAmelCase__ :Optional[Any] = max_seq_length lowerCAmelCase__ :Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCAmelCase__ :Dict = spectrogram_length lowerCAmelCase__ :int = feature_size lowerCAmelCase__ :int = num_audio_channels lowerCAmelCase__ :int = hop_length lowerCAmelCase__ :Optional[Any] = chunk_length lowerCAmelCase__ :int = sampling_rate def snake_case ( self ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def snake_case ( self , __UpperCAmelCase=False , __UpperCAmelCase=False ): '''simple docstring''' def _flatten(__UpperCAmelCase ): return list(itertools.chain(*__UpperCAmelCase ) ) if equal_length: lowerCAmelCase__ :Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCAmelCase__ :List[str] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCAmelCase__ :List[Any] = [np.asarray(__UpperCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Tuple = TvltFeatureExtractor def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = TvltFeatureExtractionTester(self ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(__UpperCAmelCase , 'spectrogram_length' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'feature_size' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'num_audio_channels' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'hop_length' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'chunk_length' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'sampling_rate' ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ :int = feat_extract_first.save_pretrained(__UpperCAmelCase )[0] check_json_file_has_correct_format(__UpperCAmelCase ) lowerCAmelCase__ :str = self.feature_extraction_class.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = feat_extract_first.to_dict() lowerCAmelCase__ :List[str] = feat_extract_second.to_dict() lowerCAmelCase__ :int = dict_first.pop('mel_filters' ) lowerCAmelCase__ :Optional[Any] = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ :List[Any] = os.path.join(__UpperCAmelCase , 'feat_extract.json' ) feat_extract_first.to_json_file(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = self.feature_extraction_class.from_json_file(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = feat_extract_first.to_dict() lowerCAmelCase__ :Union[str, Any] = feat_extract_second.to_dict() lowerCAmelCase__ :Any = dict_first.pop('mel_filters' ) lowerCAmelCase__ :Optional[Any] = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 lowerCAmelCase__ :List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] lowerCAmelCase__ :Union[str, Any] = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs] # Test not batched input lowerCAmelCase__ :List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched lowerCAmelCase__ :List[Any] = feature_extractor(__UpperCAmelCase , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking lowerCAmelCase__ :str = feature_extractor( __UpperCAmelCase , return_tensors='np' , sampling_rate=4_4_1_0_0 , mask_audio=__UpperCAmelCase ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. lowerCAmelCase__ :int = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] lowerCAmelCase__ :str = np.asarray(__UpperCAmelCase ) lowerCAmelCase__ :int = feature_extractor(__UpperCAmelCase , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech lowerCAmelCase__ :List[Any] = ds.sort('id' ).select(range(__UpperCAmelCase ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = self._load_datasamples(1 ) lowerCAmelCase__ :Union[str, Any] = TvltFeatureExtractor() lowerCAmelCase__ :str = feature_extractor(__UpperCAmelCase , return_tensors='pt' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) ) lowerCAmelCase__ :Tuple = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __UpperCAmelCase , atol=1E-4 ) )
293
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :Tuple = StableDiffusionXLImgaImgPipeline __magic_name__ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} __magic_name__ :Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} __magic_name__ :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __magic_name__ :str = IMAGE_TO_IMAGE_IMAGE_PARAMS __magic_name__ :Any = IMAGE_TO_IMAGE_IMAGE_PARAMS def snake_case ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ :Optional[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , ) lowerCAmelCase__ :str = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , ) torch.manual_seed(0 ) lowerCAmelCase__ :str = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCAmelCase__ :str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , ) lowerCAmelCase__ :int = CLIPTextModel(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase ) lowerCAmelCase__ :Any = CLIPTextModelWithProjection(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase ) lowerCAmelCase__ :str = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith('mps' ): lowerCAmelCase__ :Optional[int] = torch.manual_seed(__UpperCAmelCase ) else: lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :int = self.get_dummy_components() lowerCAmelCase__ :List[str] = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :int = sd_pipe(**__UpperCAmelCase ).images lowerCAmelCase__ :int = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase__ :List[str] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def snake_case ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.get_dummy_components() lowerCAmelCase__ :str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # forward without prompt embeds lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt'] lowerCAmelCase__ :Tuple = negative_prompt lowerCAmelCase__ :str = 3 * [inputs['prompt']] lowerCAmelCase__ :Optional[Any] = sd_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = 3 * ['this is a negative prompt'] lowerCAmelCase__ :str = 3 * [inputs.pop('prompt' )] ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) :List[str] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase ) lowerCAmelCase__ :str = sd_pipe( **__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , ) lowerCAmelCase__ :Optional[Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ): '''simple docstring''' lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) ) lowerCAmelCase__ :Optional[int] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) lowerCAmelCase__ :int = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase ) lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase__ :List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
293
1