code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' def _lowerCamelCase ( lowercase : list ) -> list: if len(lowercase ) <= 1: return [tuple(lowercase )] _a = [] def generate(lowercase : int , lowercase : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowercase ) for i in range(k - 1 ): if k % 2 == 0: # k is even _a , _a = arr[k - 1], arr[i] else: # k is odd _a , _a = arr[k - 1], arr[0] generate(k - 1 , lowercase ) generate(len(lowercase ) , lowercase ) return res if __name__ == "__main__": lowerCAmelCase_ : Dict = input('Enter numbers separated by a comma:\n').strip() lowerCAmelCase_ : Optional[Any] = [int(item) for item in user_input.split(',')] print(heaps(arr))
63
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : List[str] = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='trocr' __a =['past_key_values'] __a ={ 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ): _a = vocab_size _a = d_model _a = decoder_layers _a = decoder_attention_heads _a = decoder_ffn_dim _a = activation_function _a = max_position_embeddings _a = dropout _a = attention_dropout _a = activation_dropout _a = init_std _a = decoder_layerdrop _a = use_cache _a = scale_embedding _a = use_learned_position_embeddings _a = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
63
1
'''simple docstring''' import numpy class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Any , __a : numpy.ndarray , __a : numpy.ndarray ): _a = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _a = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _a = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _a = numpy.random.rand(3 , 1 ) # Real output values provided. _a = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _a = numpy.zeros(output_array.shape ) def UpperCamelCase__ ( self : Optional[Any] ): _a = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _a = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _a = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def UpperCamelCase__ ( self : Dict ): _a = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _a = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _a = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def UpperCamelCase__ ( self : Any , __a : numpy.ndarray , __a : int , __a : bool ): for iteration in range(1 , iterations + 1 ): _a = self.feedforward() self.back_propagation() if give_loss: _a = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'Iteration {iteration} Loss: {loss}' ) def UpperCamelCase__ ( self : Tuple , __a : numpy.ndarray ): _a = input_arr _a = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _a = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _a = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _lowerCamelCase ( lowercase : numpy.ndarray ) -> numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def _lowerCamelCase ( lowercase : numpy.ndarray ) -> numpy.ndarray: return (value) * (1 - (value)) def _lowerCamelCase ( ) -> int: _a = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _a = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _a = TwoHiddenLayerNeuralNetwork( input_array=lowercase , output_array=lowercase ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=lowercase , iterations=10 , give_loss=lowercase ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
63
'''simple docstring''' import argparse import os import re lowerCAmelCase_ : Any = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"') def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]: with open(lowercase , "r" , encoding="utf-8" ) as f: _a = f.read() _a = content.split("\n" ) _a = [] _a = 0 while line_idx < len(lowercase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: _a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 _a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": _a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers _a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowercase , "w" , encoding="utf-8" ) as f: f.write("\n".join(lowercase ) ) elif "\n".join(lowercase ) != content: return True def _lowerCamelCase ( lowercase : bool = False ) -> List[str]: _a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )] _a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames] if not overwrite and any(lowercase ): _a = [f for f, d in zip(lowercase , lowercase ) if d] raise ValueError( F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix' " this." ) if __name__ == "__main__": lowerCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') lowerCAmelCase_ : Optional[int] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
63
1
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : List[str] , __a : List[Any] , __a : str=13 , __a : Dict=10 , __a : Tuple=3 , __a : Dict=2 , __a : Any=2 , __a : Union[str, Any]=True , __a : str=True , __a : Optional[int]=32 , __a : List[str]=5 , __a : int=4 , __a : Any=37 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[str]=0.1 , __a : Tuple=10 , __a : Union[str, Any]=0.02 , __a : Union[str, Any]="divided_space_time" , __a : Dict=None , ): _a = parent _a = batch_size _a = image_size _a = num_channels _a = patch_size _a = num_frames _a = is_training _a = use_labels _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = attention_type _a = initializer_range _a = scope _a = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token _a = (image_size // patch_size) ** 2 _a = (num_frames) * self.num_patches_per_frame + 1 def UpperCamelCase__ ( self : Union[str, Any] ): _a = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.num_labels ) _a = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self : int ): _a = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) _a = self.num_labels return config def UpperCamelCase__ ( self : List[Any] , __a : str , __a : Union[str, Any] , __a : List[str] ): _a = TimesformerModel(config=__a ) model.to(__a ) model.eval() _a = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self : Any , __a : Union[str, Any] , __a : str , __a : List[Any] ): _a = TimesformerForVideoClassification(__a ) model.to(__a ) model.eval() _a = model(__a ) # verify the logits shape _a = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __a ) def UpperCamelCase__ ( self : Any ): _a = self.prepare_config_and_inputs() _a , _a , _a = config_and_inputs _a = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =(TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () __a =( {'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification} if is_torch_available() else {} ) __a =False __a =False __a =False __a =False def UpperCamelCase__ ( self : Dict ): _a = TimesformerModelTester(self ) _a = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def UpperCamelCase__ ( self : int , __a : List[Any] , __a : List[Any] , __a : Any=False ): _a = copy.deepcopy(__a ) if return_labels: if model_class in get_values(__a ): _a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) return inputs_dict def UpperCamelCase__ ( self : int ): self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds" ) def UpperCamelCase__ ( self : List[Any] ): pass def UpperCamelCase__ ( self : Optional[int] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def UpperCamelCase__ ( self : Dict ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__a ) _a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def UpperCamelCase__ ( self : Optional[int] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def UpperCamelCase__ ( self : List[str] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__a ) @slow def UpperCamelCase__ ( self : Tuple ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a = TimesformerModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def UpperCamelCase__ ( self : Optional[Any] ): if not self.has_attentions: pass else: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = True for model_class in self.all_model_classes: _a = self.model_tester.seq_length _a = self.model_tester.num_frames _a = True _a = False _a = True _a = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(__a , __a ) ) _a = outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _a = True _a = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(__a , __a ) ) _a = outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) _a = len(__a ) # Check attention is always last and order is fine _a = True _a = True _a = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(__a , __a ) ) self.assertEqual(out_len + 1 , len(__a ) ) _a = outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def UpperCamelCase__ ( self : str ): def check_hidden_states_output(__a : Union[str, Any] , __a : Optional[Any] , __a : Any ): _a = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(__a , __a ) ) _a = outputs.hidden_states _a = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__a ) , __a ) _a = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a = True check_hidden_states_output(__a , __a , __a ) def _lowerCamelCase ( ) -> int: _a = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) _a = np.load(lowercase ) return list(lowercase ) @require_torch @require_vision class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @cached_property def UpperCamelCase__ ( self : List[str] ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self : str ): _a = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to( __a ) _a = self.default_image_processor _a = prepare_video() _a = image_processor(video[:8] , return_tensors="pt" ).to(__a ) # forward pass with torch.no_grad(): _a = model(**__a ) # verify the logits _a = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __a ) _a = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
63
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='big_bird' def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ): super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) _a = vocab_size _a = max_position_embeddings _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = type_vocab_size _a = layer_norm_eps _a = use_cache _a = rescale_embeddings _a = attention_type _a = use_bias _a = block_size _a = num_random_blocks _a = classifier_dropout class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property def UpperCamelCase__ ( self : Optional[int] ): if self.task == "multiple-choice": _a = {0: "batch", 1: "choice", 2: "sequence"} else: _a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
63
1
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
63
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" @register_to_config def __init__( self : Dict , *, __a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ): super().__init__() _a = nn.Parameter(torch.zeros(__a ) ) # parameters for additional clip time embeddings _a = nn.Linear(__a , __a ) _a = nn.Linear(__a , __a ) # parameters for encoder hidden states _a = clip_extra_context_tokens _a = nn.Linear( __a , self.clip_extra_context_tokens * cross_attention_dim ) _a = nn.Linear(__a , __a ) _a = nn.LayerNorm(__a ) def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _a = image_embeddings.shape[0] _a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _a = classifier_free_guidance_embeddings.expand( __a , -1 ) _a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _a = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _a = self.embedding_proj(__a ) _a = self.clip_image_embeddings_project_to_time_embeddings(__a ) _a = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _a = self.clip_extra_context_tokens_proj(__a ) _a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens ) _a = clip_extra_context_tokens.permute(0 , 2 , 1 ) _a = self.encoder_hidden_states_proj(__a ) _a = self.text_encoder_hidden_states_norm(__a ) _a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
63
1
'''simple docstring''' from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __lt__( self : int , __a : List[Any] ): return self[-1] < other[-1] def __eq__( self : List[Any] , __a : Optional[int] ): return self[-1] == other[-1] def _lowerCamelCase ( lowercase : list ) -> list: _a = [] # sort into stacks for element in collection: _a = Stack([element] ) _a = bisect_left(lowercase , lowercase ) if i != len(lowercase ): stacks[i].append(lowercase ) else: stacks.append(lowercase ) # use a heap-based merge to merge stack efficiently _a = merge(*(reversed(lowercase ) for stack in stacks) ) return collection if __name__ == "__main__": lowerCAmelCase_ : List[str] = input('Enter numbers separated by a comma:\n').strip() lowerCAmelCase_ : Optional[int] = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
63
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _lowerCamelCase ( lowercase : Dict ) -> Any: _a = filter(lambda lowercase : p.requires_grad , model.parameters() ) _a = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_ : int = logging.getLogger(__name__) def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any: if metric == "rouge2": _a = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": _a = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": _a = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' " function." ) _a = ModelCheckpoint( dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]: return EarlyStopping( monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , ) class __SCREAMING_SNAKE_CASE (pl.Callback ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): _a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__a ) @rank_zero_only def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ): logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) _a = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results _a = Path(pl_module.hparams.output_dir ) if type_path == "test": _a = od / "test_results.txt" _a = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _a = od / f'{type_path}_results/{trainer.global_step:05d}.txt' _a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=__a ) generations_file.parent.mkdir(exist_ok=__a ) with open(__a , "a+" ) as writer: for key in sorted(__a ): if key in ["log", "progress_bar", "preds"]: continue _a = metrics[key] if isinstance(__a , torch.Tensor ): _a = val.item() _a = f'{key}: {val:.6f}\n' writer.write(__a ) if not save_generations: return if "preds" in metrics: _a = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__a ) @rank_zero_only def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ): try: _a = pl_module.model.model.num_parameters() except AttributeError: _a = pl_module.model.num_parameters() _a = count_trainable_parameters(__a ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} ) @rank_zero_only def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__a , __a , "test" ) @rank_zero_only def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
63
1
'''simple docstring''' from math import factorial def _lowerCamelCase ( lowercase : int = 20 ) -> int: _a = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... _a = n // 2 return int(factorial(lowercase ) / (factorial(lowercase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: lowerCAmelCase_ : str = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
63
'''simple docstring''' import math class __SCREAMING_SNAKE_CASE : """simple docstring""" def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ): _a = 0.0 _a = 0.0 for i in range(len(__a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ): for i in range(len(__a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _lowerCamelCase ( ) -> None: # Training Examples ( m, n ) _a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _a = SelfOrganizingMap() _a = 3 _a = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _a = training_samples[j] # Compute the winning vector _a = self_organizing_map.get_winner(lowercase , lowercase ) # Update the winning vector _a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase ) # classify test sample _a = [0, 0, 0, 1] _a = self_organizing_map.get_winner(lowercase , lowercase ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
63
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='big_bird' def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ): super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) _a = vocab_size _a = max_position_embeddings _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = type_vocab_size _a = layer_norm_eps _a = use_cache _a = rescale_embeddings _a = attention_type _a = use_bias _a = block_size _a = num_random_blocks _a = classifier_dropout class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property def UpperCamelCase__ ( self : Optional[int] ): if self.task == "multiple-choice": _a = {0: "batch", 1: "choice", 2: "sequence"} else: _a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
63
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='OwlViTImageProcessor' __a =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )): _a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )] elif isinstance(__a , __a ) and isinstance(text[0] , __a ): _a = [] # Maximum number of queries across batch _a = max([len(__a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__a ) != max_num_queries: _a = t + [" "] * (max_num_queries - len(__a )) _a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a ) encodings.append(__a ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _a = BatchEncoding() _a = input_ids _a = attention_mask if query_images is not None: _a = BatchEncoding() _a = self.image_processor( __a , return_tensors=__a , **__a ).pixel_values _a = query_pixel_values if images is not None: _a = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: _a = image_features.pixel_values return encoding elif query_images is not None and images is not None: _a = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ): return self.image_processor.post_process(*__a , **__a ) def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ): return self.image_processor.post_process_object_detection(*__a , **__a ) def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ): return self.image_processor.post_process_image_guided_detection(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : str ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
1
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCAmelCase_ : Tuple = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json' with io.open(filename, 'r', encoding='utf-8') as f: lowerCAmelCase_ : Dict = json.load(f) @require_torch class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : List[str] , __a : Optional[Any] ): return FSMTTokenizer.from_pretrained(__a ) def UpperCamelCase__ ( self : Union[str, Any] , __a : List[str] ): _a = FSMTForConditionalGeneration.from_pretrained(__a ).to(__a ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCamelCase__ ( self : Optional[int] , __a : Union[str, Any] , __a : Tuple ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality _a = f'facebook/wmt19-{pair}' _a = self.get_tokenizer(__a ) _a = self.get_model(__a ) _a = bleu_data[pair]["src"] _a = bleu_data[pair]["tgt"] _a = tokenizer(__a , return_tensors="pt" , truncation=__a , padding="longest" ).to(__a ) _a = model.generate( input_ids=batch.input_ids , num_beams=8 , ) _a = tokenizer.batch_decode( __a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) _a = calculate_bleu(__a , __a ) print(__a ) self.assertGreaterEqual(scores["bleu"] , __a )
63
'''simple docstring''' def _lowerCamelCase ( lowercase : str ) -> list: if n_term == "": return [] _a = [] for temp in range(int(lowercase ) ): series.append(F'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
63
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Dict , __a : Optional[Any]=2 , __a : Any=3 , __a : Any=64 , __a : List[str]=None ): _a = np.random.default_rng(__a ) _a = length _a = rng.normal(size=(length,) ).astype(np.floataa ) _a = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : List[str] ): return self.length def __getitem__( self : Tuple , __a : Union[str, Any] ): return {"x": self.x[i], "y": self.y[i]} class __SCREAMING_SNAKE_CASE (torch.nn.Module ): """simple docstring""" def __init__( self : Any , __a : Any=0 , __a : Any=0 , __a : Optional[Any]=False ): super().__init__() _a = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) _a = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) _a = True def UpperCamelCase__ ( self : str , __a : Tuple=None ): if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) _a = False return x * self.a[0] + self.b[0] class __SCREAMING_SNAKE_CASE (torch.nn.Module ): """simple docstring""" def __init__( self : Any , __a : int=0 , __a : Any=0 , __a : str=False ): super().__init__() _a = torch.nn.Parameter(torch.tensor(__a ).float() ) _a = torch.nn.Parameter(torch.tensor(__a ).float() ) _a = True def UpperCamelCase__ ( self : Optional[int] , __a : str=None ): if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) _a = False return x * self.a + self.b def _lowerCamelCase ( lowercase : Dict , lowercase : int = 16 ) -> Any: from datasets import load_dataset from transformers import AutoTokenizer _a = AutoTokenizer.from_pretrained("bert-base-cased" ) _a = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} _a = load_dataset("csv" , data_files=lowercase ) _a = datasets["train"].unique("label" ) _a = {v: i for i, v in enumerate(lowercase )} def tokenize_function(lowercase : Dict ): # max_length=None => use the model max length (it's actually the default) _a = tokenizer( examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase , padding="max_length" ) if "label" in examples: _a = [label_to_id[l] for l in examples["label"]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _a = datasets.map( lowercase , batched=lowercase , remove_columns=["sentence1", "sentence2", "label"] , ) def collate_fn(lowercase : str ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(lowercase , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. _a = DataLoader(tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 ) _a = DataLoader(tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 ) return train_dataloader, eval_dataloader
63
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) lowerCAmelCase_ : List[str] = logging.getLogger(__name__) lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE def _lowerCamelCase ( ) -> Optional[int]: _a = argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." ) _a = parser.parse_args() return args def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]: try: if args.tpu_name: _a = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: _a = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(lowercase ) tf.tpu.experimental.initialize_tpu_system(lowercase ) return tpu def _lowerCamelCase ( lowercase : List[str] ) -> Any: _a = 0 for file in file_list: _a = file.split("/" )[-1] _a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 ) _a = int(lowercase ) num_samples += sample_count return num_samples def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int: _a = count_samples(lowercase ) _a = tf.data.Dataset.from_tensor_slices(lowercase ) if shuffle: _a = dataset.shuffle(len(lowercase ) ) _a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here _a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) if shuffle: assert shuffle_buffer_size is not None _a = dataset.shuffle(args.shuffle_buffer_size ) _a = dataset.batch(lowercase , drop_remainder=lowercase ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) _a = dataset.prefetch(lowercase ) return dataset def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict: if not args.no_tpu: _a = initialize_tpu(lowercase ) _a = tf.distribute.TPUStrategy(lowercase ) else: _a = tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) _a = AutoTokenizer.from_pretrained(args.tokenizer ) _a = AutoConfig.from_pretrained(args.pretrained_model_config ) _a = tokenizer.vocab_size _a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' ) _a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' ) _a = count_samples(lowercase ) _a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) _a = steps_per_epoch * args.num_epochs with strategy.scope(): _a = TFAutoModelForMaskedLM.from_config(lowercase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built _a , _a = create_optimizer( num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowercase , metrics=["accuracy"] ) def decode_fn(lowercase : int ): _a = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(lowercase , lowercase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. _a = DataCollatorForLanguageModeling( tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" ) def mask_with_collator(lowercase : List[Any] ): # TF really needs an isin() function _a = ( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) _a , _a = data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , ) return batch _a = args.per_replica_batch_size * strategy.num_replicas_in_sync _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , ) _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , ) _a = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) ) model.fit( lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": lowerCAmelCase_ : Any = parse_args() main(args)
63
1
'''simple docstring''' import numpy as np def _lowerCamelCase ( lowercase : np.ndarray ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def _lowerCamelCase ( lowercase : np.ndarray ) -> np.ndarray: return vector * sigmoid(lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
63
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='LayoutLMv3ImageProcessor' __a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor _a = self.image_processor(images=__a , return_tensors=__a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__a , __a ): _a = [text] # add batch dimension (as the image processor always adds a batch dimension) _a = features["words"] _a = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) # add pixel values _a = features.pop("pixel_values" ) if return_overflowing_tokens is True: _a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] ) _a = images return encoded_inputs def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _a = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__a ) != len(__a ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f' {len(__a )} and {len(__a )}' ) return images_with_overflow def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : Tuple ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCamelCase__ ( self : int ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
1
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin lowerCAmelCase_ : List[Any] = logging.get_logger(__name__) enable_full_determinism() class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =UNetaDModel __a ='sample' @property def UpperCamelCase__ ( self : Tuple ): _a = 4 _a = 3 _a = (32, 32) _a = floats_tensor((batch_size, num_channels) + sizes ).to(__a ) _a = torch.tensor([10] ).to(__a ) return {"sample": noise, "timestep": time_step} @property def UpperCamelCase__ ( self : List[Any] ): return (3, 32, 32) @property def UpperCamelCase__ ( self : Optional[Any] ): return (3, 32, 32) def UpperCamelCase__ ( self : Union[str, Any] ): _a = { "block_out_channels": (32, 64), "down_block_types": ("DownBlock2D", "AttnDownBlock2D"), "up_block_types": ("AttnUpBlock2D", "UpBlock2D"), "attention_head_dim": 3, "out_channels": 3, "in_channels": 3, "layers_per_block": 2, "sample_size": 32, } _a = self.dummy_input return init_dict, inputs_dict class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =UNetaDModel __a ='sample' @property def UpperCamelCase__ ( self : List[Any] ): _a = 4 _a = 4 _a = (32, 32) _a = floats_tensor((batch_size, num_channels) + sizes ).to(__a ) _a = torch.tensor([10] ).to(__a ) return {"sample": noise, "timestep": time_step} @property def UpperCamelCase__ ( self : List[str] ): return (4, 32, 32) @property def UpperCamelCase__ ( self : List[Any] ): return (4, 32, 32) def UpperCamelCase__ ( self : Optional[int] ): _a = { "sample_size": 32, "in_channels": 4, "out_channels": 4, "layers_per_block": 2, "block_out_channels": (32, 64), "attention_head_dim": 32, "down_block_types": ("DownBlock2D", "DownBlock2D"), "up_block_types": ("UpBlock2D", "UpBlock2D"), } _a = self.dummy_input return init_dict, inputs_dict def UpperCamelCase__ ( self : str ): _a , _a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__a ) self.assertIsNotNone(__a ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__a ) _a = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" ) def UpperCamelCase__ ( self : int ): _a , _a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__a ) model.to(__a ) _a = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" ) def UpperCamelCase__ ( self : Any ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` _a , _a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__a ) model_accelerate.to(__a ) model_accelerate.eval() _a = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) _a = noise.to(__a ) _a = torch.tensor([10] * noise.shape[0] ).to(__a ) _a = model_accelerate(__a , __a )["sample"] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() _a , _a = UNetaDModel.from_pretrained( "fusing/unet-ldm-dummy-update" , output_loading_info=__a , low_cpu_mem_usage=__a ) model_normal_load.to(__a ) model_normal_load.eval() _a = model_normal_load(__a , __a )["sample"] assert torch_all_close(__a , __a , rtol=1e-3 ) def UpperCamelCase__ ( self : Dict ): _a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" ) model.eval() model.to(__a ) _a = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _a = noise.to(__a ) _a = torch.tensor([10] * noise.shape[0] ).to(__a ) with torch.no_grad(): _a = model(__a , __a ).sample _a = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _a = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] ) # fmt: on self.assertTrue(torch_all_close(__a , __a , rtol=1e-3 ) ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =UNetaDModel __a ='sample' @property def UpperCamelCase__ ( self : Union[str, Any] , __a : List[Any]=(32, 32) ): _a = 4 _a = 3 _a = floats_tensor((batch_size, num_channels) + sizes ).to(__a ) _a = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__a ) return {"sample": noise, "timestep": time_step} @property def UpperCamelCase__ ( self : List[Any] ): return (3, 32, 32) @property def UpperCamelCase__ ( self : Union[str, Any] ): return (3, 32, 32) def UpperCamelCase__ ( self : List[str] ): _a = { "block_out_channels": [32, 64, 64, 64], "in_channels": 3, "layers_per_block": 1, "out_channels": 3, "time_embedding_type": "fourier", "norm_eps": 1e-6, "mid_block_scale_factor": math.sqrt(2.0 ), "norm_num_groups": None, "down_block_types": [ "SkipDownBlock2D", "AttnSkipDownBlock2D", "SkipDownBlock2D", "SkipDownBlock2D", ], "up_block_types": [ "SkipUpBlock2D", "SkipUpBlock2D", "AttnSkipUpBlock2D", "SkipUpBlock2D", ], } _a = self.dummy_input return init_dict, inputs_dict @slow def UpperCamelCase__ ( self : Optional[Any] ): _a , _a = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=__a ) self.assertIsNotNone(__a ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__a ) _a = self.dummy_input _a = floats_tensor((4, 3) + (2_56, 2_56) ).to(__a ) _a = noise _a = model(**__a ) assert image is not None, "Make sure output is not None" @slow def UpperCamelCase__ ( self : Union[str, Any] ): _a = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" ) model.to(__a ) _a = 4 _a = 3 _a = (2_56, 2_56) _a = torch.ones((batch_size, num_channels) + sizes ).to(__a ) _a = torch.tensor(batch_size * [1e-4] ).to(__a ) with torch.no_grad(): _a = model(__a , __a ).sample _a = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _a = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] ) # fmt: on self.assertTrue(torch_all_close(__a , __a , rtol=1e-2 ) ) def UpperCamelCase__ ( self : Dict ): _a = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" ) model.to(__a ) _a = 4 _a = 3 _a = (32, 32) _a = torch.ones((batch_size, num_channels) + sizes ).to(__a ) _a = torch.tensor(batch_size * [1e-4] ).to(__a ) with torch.no_grad(): _a = model(__a , __a ).sample _a = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _a = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] ) # fmt: on self.assertTrue(torch_all_close(__a , __a , rtol=1e-2 ) ) def UpperCamelCase__ ( self : Tuple ): # not required for this model pass
63
'''simple docstring''' from ....utils import logging lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ): _a = config.__dict__ _a = modal_hidden_size if num_labels: _a = num_labels
63
1
'''simple docstring''' import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : List[Any] , __a : List[str] , __a : Tuple=13 , __a : List[Any]=30 , __a : int=2 , __a : Optional[Any]=3 , __a : List[str]=True , __a : List[Any]=True , __a : Tuple=32 , __a : Optional[int]=5 , __a : Any=4 , __a : Dict=37 , __a : Dict="gelu" , __a : int=0.1 , __a : Dict=0.1 , __a : List[Any]=10 , __a : Union[str, Any]=0.02 , __a : Dict=None , ): _a = parent _a = batch_size _a = image_size _a = patch_size _a = num_channels _a = is_training _a = use_labels _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = type_sequence_label_size _a = initializer_range _a = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _a = (image_size // patch_size) ** 2 _a = num_patches + 1 def UpperCamelCase__ ( self : Tuple ): _a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self : Tuple ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def UpperCamelCase__ ( self : Tuple , __a : str , __a : Dict , __a : Any ): _a = ViTMSNModel(config=__a ) model.to(__a ) model.eval() _a = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : str , __a : Tuple ): _a = self.type_sequence_label_size _a = ViTMSNForImageClassification(__a ) model.to(__a ) model.eval() _a = model(__a , labels=__a ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _a = 1 _a = ViTMSNForImageClassification(__a ) model.to(__a ) model.eval() _a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase__ ( self : Optional[int] ): _a = self.prepare_config_and_inputs() _a , _a , _a = config_and_inputs _a = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =(ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __a =( {'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification} if is_torch_available() else {} ) __a =False __a =False __a =False __a =False def UpperCamelCase__ ( self : Optional[int] ): _a = ViTMSNModelTester(self ) _a = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def UpperCamelCase__ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def UpperCamelCase__ ( self : List[str] ): pass def UpperCamelCase__ ( self : Union[str, Any] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def UpperCamelCase__ ( self : Optional[int] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__a ) _a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def UpperCamelCase__ ( self : Optional[int] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def UpperCamelCase__ ( self : int ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def UpperCamelCase__ ( self : List[str] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a = ViTMSNModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def _lowerCamelCase ( ) -> Optional[Any]: _a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @cached_property def UpperCamelCase__ ( self : Dict ): return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def UpperCamelCase__ ( self : str ): torch.manual_seed(2 ) _a = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__a ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=__a , return_tensors="pt" ).to(__a ) # forward pass with torch.no_grad(): _a = model(**__a ) # verify the logits _a = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __a ) _a = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int = 100 ) -> int: _a = 0 _a = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
63
1
'''simple docstring''' from __future__ import annotations import math from collections.abc import Callable def _lowerCamelCase ( lowercase : Callable[[int | float], int | float] , lowercase : int | float , lowercase : int | float , lowercase : int = 100 , ) -> float: _a = x_start _a = fnc(lowercase ) _a = 0.0 for _ in range(lowercase ): # Approximates curve as a sequence of linear lines and sums their length _a = (x_end - x_start) / steps + xa _a = fnc(lowercase ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step _a = xa _a = fxa return length if __name__ == "__main__": def _lowerCamelCase ( lowercase : int ) -> Optional[Any]: return math.sin(10 * x ) print('f(x) = sin(10 * x)') print('The length of the curve from x = -10 to x = 10 is:') lowerCAmelCase_ : List[Any] = 10 while i <= 10_00_00: print(f"""With {i} steps: {line_length(f, -10, 10, i)}""") i *= 10
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: if num < 0: return False _a = num _a = 0 while num > 0: _a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
63
1
'''simple docstring''' class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : int , __a : Union[str, Any] ): # we need a list not a string, so do something to change the type _a = arr.split("," ) def UpperCamelCase__ ( self : Union[str, Any] ): _a = [int(self.array[0] )] * len(self.array ) _a = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): _a = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) _a = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": lowerCAmelCase_ : Optional[int] = input('please input some numbers:') lowerCAmelCase_ : List[Any] = SubArray(whole_array) lowerCAmelCase_ : List[str] = array.solve_sub_array() print(('the results is:', re))
63
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[str] = [ 'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
1
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> str: if number > 0: raise ValueError("input must be a negative integer" ) _a = len(bin(lowercase )[3:] ) _a = bin(abs(lowercase ) - (1 << binary_number_length) )[3:] _a = ( ( "1" + "0" * (binary_number_length - len(lowercase )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
63
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase_ : Any = get_tests_dir('fixtures') lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json') class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] ): _a = 0 def UpperCamelCase__ ( self : str ): _a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: _a = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _a = AutoFeatureExtractor.from_pretrained(__a ).to_dict() config_dict.pop("feature_extractor_type" ) _a = WavaVecaFeatureExtractor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) # make sure private variable is not incorrectly saved _a = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Union[str, Any] ): with self.assertRaisesRegex( __a , "bert-base is not a local folder and is not a valid model identifier" ): _a = AutoFeatureExtractor.from_pretrained("bert-base" ) def UpperCamelCase__ ( self : Optional[Any] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" ) def UpperCamelCase__ ( self : List[Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): _a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def UpperCamelCase__ ( self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) def UpperCamelCase__ ( self : Any ): try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoFeatureExtractor.register(__a , __a ) # Now that the config is registered, it can be used as any other config with the auto-API _a = CustomFeatureExtractor.from_pretrained(__a ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase__ ( self : Tuple ): class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =True try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # If remote code is not set, the default is to use local _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
63
1
'''simple docstring''' import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Tuple , lowercase : Tuple ) -> List[str]: # Initialise PyTorch model _a = MobileBertConfig.from_json_file(lowercase ) print(F'Building PyTorch model from configuration: {config}' ) _a = MobileBertForPreTraining(lowercase ) # Load weights from tf checkpoint _a = load_tf_weights_in_mobilebert(lowercase , lowercase , lowercase ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , lowercase ) if __name__ == "__main__": lowerCAmelCase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--mobilebert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained MobileBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase_ : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
63
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : int = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='gpt_bigcode' __a =['past_key_values'] __a ={ 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ): _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = attention_softmax_in_fpaa _a = scale_attention_softmax_in_fpaa _a = multi_query _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
63
1
'''simple docstring''' import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Tuple , __a : List[str] , __a : str=13 , __a : List[str]=7 , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : List[str]=True , __a : Optional[int]=99 , __a : Union[str, Any]=32 , __a : Optional[Any]=5 , __a : Union[str, Any]=4 , __a : Dict=37 , __a : List[str]="gelu" , __a : Optional[Any]=0.1 , __a : List[Any]=0.1 , __a : Tuple=1_28 , __a : int=32 , __a : int=16 , __a : Dict=2 , __a : Dict=0.02 , __a : Any=3 , __a : Dict=4 , __a : Any=None , ): _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_input_mask _a = use_token_type_ids _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = type_sequence_label_size _a = initializer_range _a = num_labels _a = num_choices _a = scope def UpperCamelCase__ ( self : List[Any] ): _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = None if self.use_input_mask: _a = random_attention_mask([self.batch_size, self.seq_length] ) _a = None if self.use_token_type_ids: _a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a = None _a = None _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a = ids_tensor([self.batch_size] , self.num_choices ) _a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self : List[str] ): return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , ) def UpperCamelCase__ ( self : List[str] ): ( ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ) = self.prepare_config_and_inputs() _a = True _a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase__ ( self : Tuple , __a : Optional[Any] , __a : Tuple , __a : Tuple , __a : Union[str, Any] , __a : Tuple , __a : List[Any] , __a : str ): _a = NezhaModel(config=__a ) model.to(__a ) model.eval() _a = model(__a , attention_mask=__a , token_type_ids=__a ) _a = model(__a , token_type_ids=__a ) _a = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase__ ( self : Optional[Any] , __a : Tuple , __a : List[Any] , __a : Any , __a : List[str] , __a : int , __a : Dict , __a : Optional[int] , __a : Dict , __a : int , ): _a = True _a = NezhaModel(__a ) model.to(__a ) model.eval() _a = model( __a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , ) _a = model( __a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , ) _a = model(__a , attention_mask=__a , token_type_ids=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[Any] , __a : Dict , __a : str , __a : Any , __a : Dict , __a : Optional[int] , __a : List[str] ): _a = NezhaForMaskedLM(config=__a ) model.to(__a ) model.eval() _a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self : List[Any] , __a : Union[str, Any] , __a : str , __a : List[Any] , __a : Optional[int] , __a : Any , __a : Optional[int] , __a : Union[str, Any] ): _a = NezhaForNextSentencePrediction(config=__a ) model.to(__a ) model.eval() _a = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCamelCase__ ( self : List[str] , __a : Optional[Any] , __a : Union[str, Any] , __a : Any , __a : Any , __a : Optional[int] , __a : Any , __a : Union[str, Any] ): _a = NezhaForPreTraining(config=__a ) model.to(__a ) model.eval() _a = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCamelCase__ ( self : Any , __a : int , __a : Optional[int] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[str] , __a : List[Any] ): _a = NezhaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() _a = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self : Union[str, Any] , __a : List[str] , __a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : List[Any] , __a : Any ): _a = self.num_labels _a = NezhaForSequenceClassification(__a ) model.to(__a ) model.eval() _a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self : str , __a : str , __a : int , __a : Optional[Any] , __a : Tuple , __a : Tuple , __a : str , __a : List[str] ): _a = self.num_labels _a = NezhaForTokenClassification(config=__a ) model.to(__a ) model.eval() _a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self : Union[str, Any] , __a : List[str] , __a : str , __a : Optional[int] , __a : int , __a : Any , __a : Dict , __a : Optional[int] ): _a = self.num_choices _a = NezhaForMultipleChoice(config=__a ) model.to(__a ) model.eval() _a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase__ ( self : Any ): _a = self.prepare_config_and_inputs() ( ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ) = config_and_inputs _a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) __a =( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) __a =True def UpperCamelCase__ ( self : Dict , __a : Union[str, Any] , __a : Any , __a : Optional[int]=False ): _a = super()._prepare_for_class(__a , __a , return_labels=__a ) if return_labels: if model_class in get_values(__a ): _a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a ) _a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) return inputs_dict def UpperCamelCase__ ( self : int ): _a = NezhaModelTester(self ) _a = ConfigTester(self , config_class=__a , hidden_size=37 ) def UpperCamelCase__ ( self : int ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self : Union[str, Any] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def UpperCamelCase__ ( self : Union[str, Any] ): _a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__a ) def UpperCamelCase__ ( self : str ): # This regression test was failing with PyTorch < 1.3 ( ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() _a = None self.model_tester.create_and_check_model_as_decoder( __a , __a , __a , __a , __a , __a , __a , __a , __a , ) def UpperCamelCase__ ( self : Optional[int] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def UpperCamelCase__ ( self : Any ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__a ) def UpperCamelCase__ ( self : Tuple ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*__a ) def UpperCamelCase__ ( self : Tuple ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__a ) def UpperCamelCase__ ( self : Tuple ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a ) def UpperCamelCase__ ( self : Optional[Any] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a ) def UpperCamelCase__ ( self : Dict ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def UpperCamelCase__ ( self : int ): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a = NezhaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @slow @require_torch_gpu def UpperCamelCase__ ( self : List[str] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return _a = True _a = model_class(config=__a ) _a = self._prepare_for_class(__a , __a ) _a = torch.jit.trace( __a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__a , os.path.join(__a , "bert.pt" ) ) _a = torch.jit.load(os.path.join(__a , "bert.pt" ) , map_location=__a ) loaded(inputs_dict["input_ids"].to(__a ) , inputs_dict["attention_mask"].to(__a ) ) @require_torch class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @slow def UpperCamelCase__ ( self : List[Any] ): _a = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" ) _a = torch.tensor([[0, 1, 2, 3, 4, 5]] ) _a = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _a = model(__a , attention_mask=__a )[0] _a = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , __a ) _a = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) ) @slow def UpperCamelCase__ ( self : List[str] ): _a = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" ) _a = torch.tensor([[0, 1, 2, 3, 4, 5]] ) _a = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _a = model(__a , attention_mask=__a )[0] _a = torch.Size((1, 6, 2_11_28) ) self.assertEqual(output.shape , __a ) _a = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: _a = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
63
1
'''simple docstring''' def _lowerCamelCase ( lowercase : list[int] , lowercase : list[int] ) -> None: _a = len(lowercase ) print("The following activities are selected:" ) # The first activity is always selected _a = 0 print(lowercase , end="," ) # Consider rest of the activities for j in range(lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowercase , end="," ) _a = j if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ : Any = [1, 3, 0, 5, 8, 5] lowerCAmelCase_ : Optional[int] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
63
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='deta' __a ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(__a , __a ): _a = backbone_config.pop("model_type" ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(__a ) _a = backbone_config _a = num_queries _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = init_xavier_std _a = encoder_layerdrop _a = auxiliary_loss _a = position_embedding_type # deformable attributes _a = num_feature_levels _a = encoder_n_points _a = decoder_n_points _a = two_stage _a = two_stage_num_proposals _a = with_box_refine _a = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher _a = class_cost _a = bbox_cost _a = giou_cost # Loss coefficients _a = mask_loss_coefficient _a = dice_loss_coefficient _a = bbox_loss_coefficient _a = giou_loss_coefficient _a = eos_coefficient _a = focal_alpha super().__init__(is_encoder_decoder=__a , **__a ) @property def UpperCamelCase__ ( self : Optional[Any] ): return self.encoder_attention_heads @property def UpperCamelCase__ ( self : Dict ): return self.d_model def UpperCamelCase__ ( self : List[str] ): _a = copy.deepcopy(self.__dict__ ) _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output
63
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __SCREAMING_SNAKE_CASE : """simple docstring""" __a =BlenderbotConfig __a ={} __a ='gelu' def __init__( self : Dict , __a : str , __a : Dict=13 , __a : List[str]=7 , __a : List[Any]=True , __a : Tuple=False , __a : Union[str, Any]=99 , __a : List[str]=32 , __a : int=2 , __a : Any=4 , __a : str=37 , __a : Tuple=0.1 , __a : Dict=0.1 , __a : int=20 , __a : List[Any]=2 , __a : int=1 , __a : Optional[int]=0 , ): _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = eos_token_id _a = pad_token_id _a = bos_token_id def UpperCamelCase__ ( self : Tuple ): _a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _a = tf.concat([input_ids, eos_tensor] , axis=1 ) _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _a = prepare_blenderbot_inputs_dict(__a , __a , __a ) return config, inputs_dict def UpperCamelCase__ ( self : str , __a : int , __a : Tuple ): _a = TFBlenderbotModel(config=__a ).get_decoder() _a = inputs_dict["input_ids"] _a = input_ids[:1, :] _a = inputs_dict["attention_mask"][:1, :] _a = inputs_dict["head_mask"] _a = 1 # first forward pass _a = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) _a , _a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _a = ids_tensor((self.batch_size, 3) , config.vocab_size ) _a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _a = tf.concat([input_ids, next_tokens] , axis=-1 ) _a = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _a = model(__a , attention_mask=__a )[0] _a = model(__a , attention_mask=__a , past_key_values=__a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _a = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _a = output_from_no_past[:, -3:, random_slice_idx] _a = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__a , __a , rtol=1e-3 ) def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : Union[str, Any]=None , lowercase : Optional[int]=None , lowercase : Tuple=None , lowercase : Any=None , lowercase : Union[str, Any]=None , ) -> List[Any]: if attention_mask is None: _a = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _a = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _a = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __a =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __a =( { 'conversational': TFBlenderbotForConditionalGeneration, 'feature-extraction': TFBlenderbotModel, 'summarization': TFBlenderbotForConditionalGeneration, 'text2text-generation': TFBlenderbotForConditionalGeneration, 'translation': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __a =True __a =False __a =False def UpperCamelCase__ ( self : Tuple ): _a = TFBlenderbotModelTester(self ) _a = ConfigTester(self , config_class=__a ) def UpperCamelCase__ ( self : int ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self : int ): _a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a ) @require_tokenizers @require_tf class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" __a =['My friends are cool but they eat too many carbs.'] __a ='facebook/blenderbot-400M-distill' @cached_property def UpperCamelCase__ ( self : Optional[int] ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCamelCase__ ( self : Union[str, Any] ): _a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase__ ( self : str ): _a = self.tokenizer(self.src_text , return_tensors="tf" ) _a = self.model.generate( model_inputs.input_ids , ) _a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
63
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]: _a = AutoTokenizer.from_pretrained(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase ) _a = tok.pad_token_id def get_lens(lowercase : Optional[int] ): _a = tqdm( DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a = [] for batch in dl: _a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist() _a = batch["labels"].ne(lowercase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowercase , lowercase ): max_lens.append(max(lowercase , lowercase ) ) else: max_lens.extend(lowercase ) return max_lens _a = get_lens(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase ) _a = get_lens(lowercase ) pickle_save(lowercase , train_ds.len_file ) pickle_save(lowercase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
63
1
'''simple docstring''' import math class __SCREAMING_SNAKE_CASE : """simple docstring""" def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ): _a = 0.0 _a = 0.0 for i in range(len(__a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ): for i in range(len(__a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _lowerCamelCase ( ) -> None: # Training Examples ( m, n ) _a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _a = SelfOrganizingMap() _a = 3 _a = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _a = training_samples[j] # Compute the winning vector _a = self_organizing_map.get_winner(lowercase , lowercase ) # Update the winning vector _a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase ) # classify test sample _a = [0, 0, 0, 1] _a = self_organizing_map.get_winner(lowercase , lowercase ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
63
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): _a = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): # pass variant but use the non-variant filenames _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Dict ): _a = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : List[str] ): # pass variant but use the non-variant filenames _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[int] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
63
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase_ : Union[str, Any] = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } lowerCAmelCase_ : Any = { 'facebook/bart-base': 10_24, 'facebook/bart-large': 10_24, 'facebook/bart-large-mnli': 10_24, 'facebook/bart-large-cnn': 10_24, 'facebook/bart-large-xsum': 10_24, 'yjernite/bart_eli5': 10_24, } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =VOCAB_FILES_NAMES __a =PRETRAINED_VOCAB_FILES_MAP __a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a =['input_ids', 'attention_mask'] __a =BartTokenizer def __init__( self : List[Any] , __a : Tuple=None , __a : str=None , __a : int=None , __a : Optional[Any]="replace" , __a : List[str]="<s>" , __a : str="</s>" , __a : Optional[Any]="</s>" , __a : List[str]="<s>" , __a : List[str]="<unk>" , __a : List[Any]="<pad>" , __a : Optional[Any]="<mask>" , __a : Tuple=False , __a : Union[str, Any]=True , **__a : Optional[int] , ): super().__init__( __a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , ) _a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space: _a = getattr(__a , pre_tok_state.pop("type" ) ) _a = add_prefix_space _a = pre_tok_class(**__a ) _a = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _a = "post_processor" _a = getattr(self.backend_tokenizer , __a , __a ) if tokenizer_component_instance: _a = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _a = tuple(state["sep"] ) if "cls" in state: _a = tuple(state["cls"] ) _a = False if state.get("add_prefix_space" , __a ) != add_prefix_space: _a = add_prefix_space _a = True if state.get("trim_offsets" , __a ) != trim_offsets: _a = trim_offsets _a = True if changes_to_apply: _a = getattr(__a , state.pop("type" ) ) _a = component_class(**__a ) setattr(self.backend_tokenizer , __a , __a ) @property def UpperCamelCase__ ( self : Any ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def UpperCamelCase__ ( self : str , __a : Dict ): _a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value _a = value def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : Any ): _a = kwargs.get("is_split_into_words" , __a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__a , **__a ) def UpperCamelCase__ ( self : Optional[int] , *__a : str , **__a : Optional[Any] ): _a = kwargs.get("is_split_into_words" , __a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*__a , **__a ) def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ): _a = self._tokenizer.model.save(__a , name=__a ) return tuple(__a ) def UpperCamelCase__ ( self : Union[str, Any] , __a : List[str] , __a : Tuple=None ): _a = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCamelCase__ ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None ): _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
63
'''simple docstring''' def _lowerCamelCase ( lowercase : bytes ) -> str: return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] ) def _lowerCamelCase ( lowercase : str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(lowercase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(lowercase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
63
1
'''simple docstring''' def _lowerCamelCase ( lowercase : str ) -> str: return "".join(chr(ord(lowercase ) - 32 ) if "a" <= char <= "z" else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
63
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str: for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any: model.train() _a = model(lowercase ) _a = F.mse_loss(lowercase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase ) def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]: set_seed(42 ) _a = RegressionModel() _a = deepcopy(lowercase ) _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) model.to(accelerator.device ) if sched: _a = AdamW(params=model.parameters() , lr=1E-3 ) _a = AdamW(params=ddp_model.parameters() , lr=1E-3 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) # Make a copy of `model` if sched: _a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase ) else: _a , _a = accelerator.prepare(lowercase , lowercase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]: # Test when on a single CPU or GPU that the context manager does nothing _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase , lowercase , lowercase , lowercase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : Tuple ) -> Tuple: # Test on distributed setup that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] GradientState._reset_state() def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' _a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase )) if accelerator.num_processes > 1: check_model_parameters(lowercase , lowercase , lowercase , lowercase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def _lowerCamelCase ( ) -> Any: _a = Accelerator() _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) _a = RegressionDataset(length=96 ) _a = DataLoader(lowercase , batch_size=16 ) _a , _a = accelerator.prepare(lowercase , lowercase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if iteration < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if batch_num < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _lowerCamelCase ( ) -> Optional[Any]: _a = Accelerator() _a = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(lowercase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(lowercase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(lowercase , lowercase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase ) def _lowerCamelCase ( lowercase : Any ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
63
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) @dataclass class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =[ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : Union[str, Any] , **__a : Dict ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: _a = deprecated_arg[3:] _a = not kwargs.pop(__a ) logger.warning( f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or' f' {positive_arg}={kwargs[positive_arg]}' ) _a = kwargs.pop("tpu_name" , self.tpu_name ) _a = kwargs.pop("device_idx" , self.device_idx ) _a = kwargs.pop("eager_mode" , self.eager_mode ) _a = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**__a ) __a =field( default=lowerCamelCase_ , metadata={'help': 'Name of TPU'} , ) __a =field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) __a =field(default=lowerCamelCase_ , metadata={'help': 'Benchmark models in eager model.'} ) __a =field( default=lowerCamelCase_ , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def UpperCamelCase__ ( self : List[str] ): requires_backends(self , ["tf"] ) _a = None if self.tpu: try: if self.tpu_name: _a = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: _a = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: _a = None return tpu @cached_property def UpperCamelCase__ ( self : Optional[int] ): requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) _a = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) _a = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU _a = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' ) return strategy @property def UpperCamelCase__ ( self : List[Any] ): requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def UpperCamelCase__ ( self : str ): requires_backends(self , ["tf"] ) return self._setup_strategy @property def UpperCamelCase__ ( self : Tuple ): requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def UpperCamelCase__ ( self : Tuple ): requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def UpperCamelCase__ ( self : Tuple ): return self.n_gpu > 0
63
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : List[str] = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='trocr' __a =['past_key_values'] __a ={ 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ): _a = vocab_size _a = d_model _a = decoder_layers _a = decoder_attention_heads _a = decoder_ffn_dim _a = activation_function _a = max_position_embeddings _a = dropout _a = attention_dropout _a = activation_dropout _a = init_std _a = decoder_layerdrop _a = use_cache _a = scale_embedding _a = use_learned_position_embeddings _a = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
63
1
'''simple docstring''' from timeit import timeit lowerCAmelCase_ : int = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _lowerCamelCase ( lowercase : str ) -> bool: _a = 0 _a = len(lowercase ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _lowerCamelCase ( lowercase : str ) -> bool: _a = len(lowercase ) // 2 _a = len(lowercase ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(lowercase ) ) def _lowerCamelCase ( lowercase : str ) -> bool: if len(lowercase ) <= 2: return True if s[0] == s[len(lowercase ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _lowerCamelCase ( lowercase : str ) -> bool: return s == s[::-1] def _lowerCamelCase ( lowercase : str ) -> None: _a = F'all({name}(key) is value for key, value in test_data.items())' _a = F'from __main__ import test_data, {name}' _a = 50_0000 _a = timeit(stmt=lowercase , setup=lowercase , number=lowercase ) print(F'{name:<35} finished {number:,} runs in {result:.5f} seconds' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"""{key:21} {value}""") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
63
'''simple docstring''' import argparse import os import re lowerCAmelCase_ : Any = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"') def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]: with open(lowercase , "r" , encoding="utf-8" ) as f: _a = f.read() _a = content.split("\n" ) _a = [] _a = 0 while line_idx < len(lowercase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: _a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 _a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": _a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers _a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowercase , "w" , encoding="utf-8" ) as f: f.write("\n".join(lowercase ) ) elif "\n".join(lowercase ) != content: return True def _lowerCamelCase ( lowercase : bool = False ) -> List[str]: _a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )] _a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames] if not overwrite and any(lowercase ): _a = [f for f, d in zip(lowercase , lowercase ) if d] raise ValueError( F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix' " this." ) if __name__ == "__main__": lowerCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') lowerCAmelCase_ : Optional[int] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
63
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer lowerCAmelCase_ : Any = logging.get_logger(__name__) lowerCAmelCase_ : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase_ : List[str] = [ 'small', 'small-base', 'medium', 'medium-base', 'intermediate', 'intermediate-base', 'large', 'large-base', 'xlarge', 'xlarge-base', ] lowerCAmelCase_ : str = { 'vocab_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json', 'funnel-transformer/small-base': ( 'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json' ), 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json', 'funnel-transformer/large-base': ( 'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json' ), 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json' ), }, } lowerCAmelCase_ : List[Any] = {f"""funnel-transformer/{name}""": 5_12 for name in _model_names} lowerCAmelCase_ : List[Any] = {f"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names} class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =VOCAB_FILES_NAMES __a =PRETRAINED_VOCAB_FILES_MAP __a =PRETRAINED_INIT_CONFIGURATION __a =FunnelTokenizer __a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a =2 def __init__( self : str , __a : Optional[Any]=None , __a : Optional[Any]=None , __a : List[str]=True , __a : str="<unk>" , __a : List[Any]="<sep>" , __a : Optional[int]="<pad>" , __a : str="<cls>" , __a : Union[str, Any]="<mask>" , __a : Dict="<s>" , __a : Tuple="</s>" , __a : Union[str, Any]=True , __a : str=True , __a : int=None , __a : int="##" , **__a : List[Any] , ): super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , bos_token=__a , eos_token=__a , clean_text=__a , tokenize_chinese_chars=__a , strip_accents=__a , wordpieces_prefix=__a , **__a , ) _a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __a ) != do_lower_case or normalizer_state.get("strip_accents" , __a ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars ): _a = getattr(__a , normalizer_state.pop("type" ) ) _a = do_lower_case _a = strip_accents _a = tokenize_chinese_chars _a = normalizer_class(**__a ) _a = do_lower_case def UpperCamelCase__ ( self : Any , __a : Any , __a : Dict=None ): _a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ ( self : str , __a : List[int] , __a : Optional[List[int]] = None ): _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self : str , __a : str , __a : Optional[str] = None ): _a = self._tokenizer.model.save(__a , name=__a ) return tuple(__a )
63
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='big_bird' def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ): super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) _a = vocab_size _a = max_position_embeddings _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = type_vocab_size _a = layer_norm_eps _a = use_cache _a = rescale_embeddings _a = attention_type _a = use_bias _a = block_size _a = num_random_blocks _a = classifier_dropout class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property def UpperCamelCase__ ( self : Optional[int] ): if self.task == "multiple-choice": _a = {0: "batch", 1: "choice", 2: "sequence"} else: _a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
63
1
'''simple docstring''' import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _lowerCamelCase ( lowercase : Any , lowercase : Optional[int] , lowercase : Dict ) -> Optional[Any]: # Construct model if openai_config_file == "": _a = OpenAIGPTConfig() else: _a = OpenAIGPTConfig.from_json_file(lowercase ) _a = OpenAIGPTModel(lowercase ) # Load weights from numpy load_tf_weights_in_openai_gpt(lowercase , lowercase , lowercase ) # Save pytorch-model _a = pytorch_dump_folder_path + "/" + WEIGHTS_NAME _a = pytorch_dump_folder_path + "/" + CONFIG_NAME print(F'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , lowercase ) print(F'Save configuration file to {pytorch_config_dump_path}' ) with open(lowercase , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--openai_checkpoint_folder_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--openai_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) lowerCAmelCase_ : List[Any] = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
63
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" @register_to_config def __init__( self : Dict , *, __a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ): super().__init__() _a = nn.Parameter(torch.zeros(__a ) ) # parameters for additional clip time embeddings _a = nn.Linear(__a , __a ) _a = nn.Linear(__a , __a ) # parameters for encoder hidden states _a = clip_extra_context_tokens _a = nn.Linear( __a , self.clip_extra_context_tokens * cross_attention_dim ) _a = nn.Linear(__a , __a ) _a = nn.LayerNorm(__a ) def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _a = image_embeddings.shape[0] _a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _a = classifier_free_guidance_embeddings.expand( __a , -1 ) _a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _a = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _a = self.embedding_proj(__a ) _a = self.clip_image_embeddings_project_to_time_embeddings(__a ) _a = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _a = self.clip_extra_context_tokens_proj(__a ) _a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens ) _a = clip_extra_context_tokens.permute(0 , 2 , 1 ) _a = self.encoder_hidden_states_proj(__a ) _a = self.text_encoder_hidden_states_norm(__a ) _a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
63
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='microsoft/speecht5_tts' __a =( 'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the ' 'text to read (in English) and returns a waveform object containing the sound.' ) __a ='text_reader' __a =SpeechTaProcessor __a =SpeechTaForTextToSpeech __a =SpeechTaHifiGan __a =['text'] __a =['audio'] def UpperCamelCase__ ( self : int ): if self.post_processor is None: _a = "microsoft/speecht5_hifigan" super().setup() def UpperCamelCase__ ( self : Any , __a : Optional[int] , __a : Dict=None ): _a = self.pre_processor(text=__a , return_tensors="pt" , truncation=__a ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("Datasets needs to be installed if not passing speaker embeddings." ) _a = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" ) _a = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def UpperCamelCase__ ( self : Optional[Any] , __a : List[Any] ): with torch.no_grad(): return self.model.generate_speech(**__a ) def UpperCamelCase__ ( self : List[Any] , __a : Optional[Any] ): with torch.no_grad(): return self.post_processor(__a ).cpu().detach()
63
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _lowerCamelCase ( lowercase : Dict ) -> Any: _a = filter(lambda lowercase : p.requires_grad , model.parameters() ) _a = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_ : int = logging.getLogger(__name__) def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any: if metric == "rouge2": _a = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": _a = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": _a = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' " function." ) _a = ModelCheckpoint( dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]: return EarlyStopping( monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , ) class __SCREAMING_SNAKE_CASE (pl.Callback ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): _a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__a ) @rank_zero_only def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ): logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) _a = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results _a = Path(pl_module.hparams.output_dir ) if type_path == "test": _a = od / "test_results.txt" _a = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _a = od / f'{type_path}_results/{trainer.global_step:05d}.txt' _a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=__a ) generations_file.parent.mkdir(exist_ok=__a ) with open(__a , "a+" ) as writer: for key in sorted(__a ): if key in ["log", "progress_bar", "preds"]: continue _a = metrics[key] if isinstance(__a , torch.Tensor ): _a = val.item() _a = f'{key}: {val:.6f}\n' writer.write(__a ) if not save_generations: return if "preds" in metrics: _a = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__a ) @rank_zero_only def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ): try: _a = pl_module.model.model.num_parameters() except AttributeError: _a = pl_module.model.num_parameters() _a = count_trainable_parameters(__a ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} ) @rank_zero_only def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__a , __a , "test" ) @rank_zero_only def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
63
1
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer lowerCAmelCase_ : List[str] = logging.get_logger(__name__) lowerCAmelCase_ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase_ : Any = { 'vocab_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json', }, 'merges_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt', }, 'tokenizer_file': { 'Salesforce/codegen-350M-mono': ( 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json' ), }, } lowerCAmelCase_ : str = { 'Salesforce/codegen-350M-mono': 20_48, } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =VOCAB_FILES_NAMES __a =PRETRAINED_VOCAB_FILES_MAP __a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a =['input_ids', 'attention_mask'] __a =CodeGenTokenizer def __init__( self : Optional[Any] , __a : Optional[int]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : List[str]="<|endoftext|>" , __a : Union[str, Any]="<|endoftext|>" , __a : int="<|endoftext|>" , __a : Dict=False , **__a : Any , ): super().__init__( __a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , ) if kwargs.pop("add_bos_token" , __a ): _a = kwargs.pop("name_or_path" , "" ) raise ValueError( "Currenty GPT2's fast tokenizer does NOT support adding a BOS token." "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n" f'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n' f'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n' "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." " so that the fast tokenizer works correctly." ) _a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space: _a = getattr(__a , pre_tok_state.pop("type" ) ) _a = add_prefix_space _a = pre_tok_class(**__a ) _a = add_prefix_space def UpperCamelCase__ ( self : Any , *__a : int , **__a : Any ): _a = kwargs.get("is_split_into_words" , __a ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : Optional[Any] , **__a : Union[str, Any] ): _a = kwargs.get("is_split_into_words" , __a ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*__a , **__a ) def UpperCamelCase__ ( self : Dict , __a : str , __a : Optional[str] = None ): _a = self._tokenizer.model.save(__a , name=__a ) return tuple(__a ) def UpperCamelCase__ ( self : Optional[Any] , __a : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , __a : bool = False , __a : bool = None , __a : Optional[List[str]] = None , **__a : Union[str, Any] , ): _a = super().decode( token_ids=__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , **__a , ) if truncate_before_pattern is not None and len(__a ) > 0: _a = self.truncate(__a , __a ) return decoded_text def UpperCamelCase__ ( self : List[str] , __a : int , __a : Dict ): def find_re(__a : Optional[int] , __a : Any , __a : List[str] ): _a = pattern.search(__a , __a ) return m.start() if m else -1 _a = [re.compile(__a , re.MULTILINE ) for pattern in truncate_before_pattern] _a = list(re.finditer("^print" , __a , re.MULTILINE ) ) if len(__a ) > 1: _a = completion[: prints[1].start()] _a = list(re.finditer("^def" , __a , re.MULTILINE ) ) if len(__a ) > 1: _a = completion[: defs[1].start()] _a = 0 _a = [ pos for pos in [find_re(__a , __a , __a ) for terminal in terminals] if pos != -1 ] if len(__a ) > 0: return completion[: min(__a )] else: return completion
63
'''simple docstring''' import math class __SCREAMING_SNAKE_CASE : """simple docstring""" def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ): _a = 0.0 _a = 0.0 for i in range(len(__a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ): for i in range(len(__a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _lowerCamelCase ( ) -> None: # Training Examples ( m, n ) _a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _a = SelfOrganizingMap() _a = 3 _a = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _a = training_samples[j] # Compute the winning vector _a = self_organizing_map.get_winner(lowercase , lowercase ) # Update the winning vector _a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase ) # classify test sample _a = [0, 0, 0, 1] _a = self_organizing_map.get_winner(lowercase , lowercase ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
63
1
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ : List[Any] = logging.get_logger(__name__) lowerCAmelCase_ : str = '▁' lowerCAmelCase_ : Dict = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'} lowerCAmelCase_ : str = { 'sentencepiece_model_file': 'sentencepiece.bpe.model', 'vocab_file': 'vocab.txt', } lowerCAmelCase_ : Any = { 'vocab_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', }, 'sentencepiece_model_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', }, } lowerCAmelCase_ : Any = { 'ernie-m-base': 5_14, 'ernie-m-large': 5_14, } lowerCAmelCase_ : Any = { 'ernie-m-base': {'do_lower_case': False}, 'ernie-m-large': {'do_lower_case': False}, } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =["input_ids"] __a =VOCAB_FILES_NAMES __a =PRETRAINED_INIT_CONFIGURATION __a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a =PRETRAINED_VOCAB_FILES_MAP __a =RESOURCE_FILES_NAMES def __init__( self : Tuple , __a : Optional[Any] , __a : Union[str, Any]=None , __a : int=False , __a : Union[str, Any]="utf8" , __a : Union[str, Any]="[UNK]" , __a : Any="[SEP]" , __a : Tuple="[PAD]" , __a : Union[str, Any]="[CLS]" , __a : List[Any]="[MASK]" , __a : Optional[Dict[str, Any]] = None , **__a : Tuple , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , vocab_file=__a , encoding=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) _a = do_lower_case _a = sentencepiece_model_ckpt _a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__a ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: _a = self.load_vocab(filepath=__a ) else: _a = {self.sp_model.id_to_piece(__a ): id for id in range(self.sp_model.get_piece_size() )} _a = {v: k for k, v in self.vocab.items()} def UpperCamelCase__ ( self : List[Any] , __a : Any ): if text is None: return None _a = self.tokenize(__a ) _a , _a = "", [] for i, ch in enumerate(__a ): if ch in self.SP_CHAR_MAPPING: _a = self.SP_CHAR_MAPPING.get(__a ) else: _a = unicodedata.normalize("NFKC" , __a ) if self.is_whitespace(__a ): continue normalized_text += ch char_mapping.extend([i] * len(__a ) ) _a , _a , _a = normalized_text, [], 0 if self.do_lower_case: _a = text.lower() for token in split_tokens: if token[:1] == "▁": _a = token[1:] _a = text[offset:].index(__a ) + offset _a = start + len(__a ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) _a = end return token_mapping @property def UpperCamelCase__ ( self : int ): return len(self.vocab ) def UpperCamelCase__ ( self : Dict ): return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : Optional[int] ): _a = self.__dict__.copy() _a = None return state def __setstate__( self : List[Any] , __a : Any ): _a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _a = {} _a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[int] ): return "".join((self.SP_CHAR_MAPPING.get(__a , __a ) for c in text) ) def UpperCamelCase__ ( self : Optional[Any] , __a : List[Any] , __a : List[str]=False , __a : Tuple=64 , __a : Union[str, Any]=0.1 ): if self.sp_model_kwargs.get("enable_sampling" ) is True: _a = True if self.sp_model_kwargs.get("alpha" ) is not None: _a = self.sp_model_kwargs.get("alpha" ) if self.sp_model_kwargs.get("nbest_size" ) is not None: _a = self.sp_model_kwargs.get("nbest_size" ) if not enable_sampling: _a = self.sp_model.EncodeAsPieces(__a ) else: _a = self.sp_model.SampleEncodeAsPieces(__a , __a , __a ) _a = [] for pi, piece in enumerate(__a ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__a ) and pi != 0: new_pieces.append(__a ) continue else: continue _a = 0 for i, chunk in enumerate(__a ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__a ) or self.is_punct(__a ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__a ) _a = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) _a = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) _a = i if len(__a ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCamelCase__ ( self : Tuple , __a : Optional[int] ): _a = "".join(__a ).replace(__a , " " ).strip() return out_string def UpperCamelCase__ ( self : Any , __a : Dict ): _a = self.convert_ids_to_tokens(__a ) _a = "".join(__a ).replace(__a , " " ).strip() return out_string def UpperCamelCase__ ( self : str , __a : Optional[Any] ): return self.vocab.get(__a , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self : Optional[Any] , __a : Any ): return self.reverse_vocab.get(__a , self.unk_token ) def UpperCamelCase__ ( self : int , __a : int , __a : Optional[int]=None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a = [self.cls_token_id] _a = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCamelCase__ ( self : Optional[int] , __a : Any , __a : Optional[int]=None ): if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[Any] , __a : Any=None , __a : Optional[Any]=False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1] def UpperCamelCase__ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ): # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(__a ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__a ) + 1) + [1] * (len(__a ) + 3) def UpperCamelCase__ ( self : Any , __a : Tuple ): if "\u4e00" <= char <= "\u9fff": return True return False def UpperCamelCase__ ( self : int , __a : int ): if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCamelCase__ ( self : List[str] , __a : List[Any] ): if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCamelCase__ ( self : Any , __a : Optional[Any] ): if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__a ) == 1: _a = unicodedata.category(__a ) if cat == "Zs": return True return False def UpperCamelCase__ ( self : List[Any] , __a : int ): _a = {} with io.open(__a , "r" , encoding="utf-8" ) as f: for index, line in enumerate(__a ): _a = line.rstrip("\n" ) _a = int(__a ) return token_to_idx def UpperCamelCase__ ( self : Tuple , __a : str , __a : Optional[str] = None ): _a = 0 if os.path.isdir(__a ): _a = os.path.join( __a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: _a = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(__a , "w" , encoding="utf-8" ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __a : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' " Please check that the vocabulary is not corrupted!" ) _a = token_index writer.write(token + "\n" ) index += 1 _a = os.path.join(__a , "sentencepiece.bpe.model" ) with open(__a , "wb" ) as fi: _a = self.sp_model.serialized_model_proto() fi.write(__a ) return (vocab_file,)
63
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='OwlViTImageProcessor' __a =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )): _a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )] elif isinstance(__a , __a ) and isinstance(text[0] , __a ): _a = [] # Maximum number of queries across batch _a = max([len(__a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__a ) != max_num_queries: _a = t + [" "] * (max_num_queries - len(__a )) _a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a ) encodings.append(__a ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _a = BatchEncoding() _a = input_ids _a = attention_mask if query_images is not None: _a = BatchEncoding() _a = self.image_processor( __a , return_tensors=__a , **__a ).pixel_values _a = query_pixel_values if images is not None: _a = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: _a = image_features.pixel_values return encoding elif query_images is not None and images is not None: _a = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ): return self.image_processor.post_process(*__a , **__a ) def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ): return self.image_processor.post_process_object_detection(*__a , **__a ) def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ): return self.image_processor.post_process_image_guided_detection(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : str ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =LDMTextToImagePipeline __a =TEXT_TO_IMAGE_PARAMS - { 'negative_prompt', 'negative_prompt_embeds', 'cross_attention_kwargs', 'prompt_embeds', } __a =PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'callback', 'callback_steps', } __a =TEXT_TO_IMAGE_BATCH_PARAMS __a =False def UpperCamelCase__ ( self : List[str] ): torch.manual_seed(0 ) _a = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _a = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) _a = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , ) torch.manual_seed(0 ) _a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _a = CLIPTextModel(__a ) _a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _a = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def UpperCamelCase__ ( self : Tuple , __a : Any , __a : Tuple=0 ): if str(__a ).startswith("mps" ): _a = torch.manual_seed(__a ) else: _a = torch.Generator(device=__a ).manual_seed(__a ) _a = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase__ ( self : int ): _a = "cpu" # ensure determinism for the device-dependent torch.Generator _a = self.get_dummy_components() _a = LDMTextToImagePipeline(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) _a = self.get_dummy_inputs(__a ) _a = pipe(**__a ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) _a = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Union[str, Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : Union[str, Any]=torch.floataa , __a : str=0 ): _a = torch.manual_seed(__a ) _a = np.random.RandomState(__a ).standard_normal((1, 4, 32, 32) ) _a = torch.from_numpy(__a ).to(device=__a , dtype=__a ) _a = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase__ ( self : Union[str, Any] ): _a = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__a ) pipe.set_progress_bar_config(disable=__a ) _a = self.get_inputs(__a ) _a = pipe(**__a ).images _a = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 2_56, 2_56, 3) _a = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] ) _a = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : List[str] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self : Any , __a : Optional[int] , __a : int=torch.floataa , __a : List[str]=0 ): _a = torch.manual_seed(__a ) _a = np.random.RandomState(__a ).standard_normal((1, 4, 32, 32) ) _a = torch.from_numpy(__a ).to(device=__a , dtype=__a ) _a = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase__ ( self : Optional[Any] ): _a = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__a ) pipe.set_progress_bar_config(disable=__a ) _a = self.get_inputs(__a ) _a = pipe(**__a ).images[0] _a = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) _a = np.abs(expected_image - image ).max() assert max_diff < 1e-3
63
'''simple docstring''' def _lowerCamelCase ( lowercase : str ) -> list: if n_term == "": return [] _a = [] for temp in range(int(lowercase ) ): series.append(F'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
63
1
'''simple docstring''' import os def _lowerCamelCase ( ) -> Tuple: with open(os.path.dirname(lowercase ) + "/grid.txt" ) as f: _a = [] # noqa: E741 for _ in range(20 ): l.append([int(lowercase ) for x in f.readline().split()] ) _a = 0 # right for i in range(20 ): for j in range(17 ): _a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: _a = temp # down for i in range(17 ): for j in range(20 ): _a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: _a = temp # diagonal 1 for i in range(17 ): for j in range(17 ): _a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: _a = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): _a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: _a = temp return maximum if __name__ == "__main__": print(solution())
63
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) lowerCAmelCase_ : List[str] = logging.getLogger(__name__) lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE def _lowerCamelCase ( ) -> Optional[int]: _a = argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." ) _a = parser.parse_args() return args def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]: try: if args.tpu_name: _a = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: _a = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(lowercase ) tf.tpu.experimental.initialize_tpu_system(lowercase ) return tpu def _lowerCamelCase ( lowercase : List[str] ) -> Any: _a = 0 for file in file_list: _a = file.split("/" )[-1] _a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 ) _a = int(lowercase ) num_samples += sample_count return num_samples def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int: _a = count_samples(lowercase ) _a = tf.data.Dataset.from_tensor_slices(lowercase ) if shuffle: _a = dataset.shuffle(len(lowercase ) ) _a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here _a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) if shuffle: assert shuffle_buffer_size is not None _a = dataset.shuffle(args.shuffle_buffer_size ) _a = dataset.batch(lowercase , drop_remainder=lowercase ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) _a = dataset.prefetch(lowercase ) return dataset def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict: if not args.no_tpu: _a = initialize_tpu(lowercase ) _a = tf.distribute.TPUStrategy(lowercase ) else: _a = tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) _a = AutoTokenizer.from_pretrained(args.tokenizer ) _a = AutoConfig.from_pretrained(args.pretrained_model_config ) _a = tokenizer.vocab_size _a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' ) _a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' ) _a = count_samples(lowercase ) _a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) _a = steps_per_epoch * args.num_epochs with strategy.scope(): _a = TFAutoModelForMaskedLM.from_config(lowercase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built _a , _a = create_optimizer( num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowercase , metrics=["accuracy"] ) def decode_fn(lowercase : int ): _a = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(lowercase , lowercase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. _a = DataCollatorForLanguageModeling( tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" ) def mask_with_collator(lowercase : List[Any] ): # TF really needs an isin() function _a = ( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) _a , _a = data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , ) return batch _a = args.per_replica_batch_size * strategy.num_replicas_in_sync _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , ) _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , ) _a = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) ) model.fit( lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": lowerCAmelCase_ : Any = parse_args() main(args)
63
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Dict , __a : int , __a : int=13 , __a : Dict=30 , __a : List[str]=2 , __a : Union[str, Any]=3 , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : Tuple=32 , __a : List[Any]=2 , __a : int=4 , __a : Optional[Any]=37 , __a : Tuple="gelu" , __a : Dict=0.1 , __a : List[str]=0.1 , __a : int=10 , __a : int=0.02 , __a : Tuple=3 , __a : List[str]=None , ): _a = parent _a = batch_size _a = image_size _a = patch_size _a = num_channels _a = is_training _a = use_labels _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = type_sequence_label_size _a = initializer_range _a = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _a = (image_size // patch_size) ** 2 _a = num_patches + 1 def UpperCamelCase__ ( self : int ): _a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self : Dict ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , ) def UpperCamelCase__ ( self : List[str] , __a : int , __a : Tuple , __a : Optional[Any] ): _a = TFViTModel(config=__a ) _a = model(__a , training=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. _a = self.image_size // 2 _a = pixel_values[:, :, :image_size, :image_size] _a = model(__a , interpolate_pos_encoding=__a , training=__a ) _a = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def UpperCamelCase__ ( self : Optional[int] , __a : List[Any] , __a : Optional[int] , __a : List[str] ): _a = self.type_sequence_label_size _a = TFViTForImageClassification(__a ) _a = model(__a , labels=__a , training=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. _a = self.image_size // 2 _a = pixel_values[:, :, :image_size, :image_size] _a = model(__a , interpolate_pos_encoding=__a , training=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _a = 1 _a = TFViTForImageClassification(__a ) _a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase__ ( self : Tuple ): _a = self.prepare_config_and_inputs() _a , _a , _a = config_and_inputs _a = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =(TFViTModel, TFViTForImageClassification) if is_tf_available() else () __a =( {'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification} if is_tf_available() else {} ) __a =False __a =False __a =False def UpperCamelCase__ ( self : List[str] ): _a = TFViTModelTester(self ) _a = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def UpperCamelCase__ ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def UpperCamelCase__ ( self : Union[str, Any] ): pass @unittest.skip(reason="ViT does not use inputs_embeds" ) def UpperCamelCase__ ( self : str ): pass def UpperCamelCase__ ( self : Union[str, Any] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , tf.keras.layers.Layer ) ) def UpperCamelCase__ ( self : Union[str, Any] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__a ) _a = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def UpperCamelCase__ ( self : Dict ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def UpperCamelCase__ ( self : Optional[int] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def UpperCamelCase__ ( self : Union[str, Any] ): _a = TFViTModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(__a ) def _lowerCamelCase ( ) -> int: _a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @cached_property def UpperCamelCase__ ( self : Union[str, Any] ): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def UpperCamelCase__ ( self : Union[str, Any] ): _a = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=__a , return_tensors="tf" ) # forward pass _a = model(**__a ) # verify the logits _a = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __a ) _a = tf.constant([-0.2744, 0.8215, -0.0836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __a , atol=1e-4 )
63
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='LayoutLMv3ImageProcessor' __a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor _a = self.image_processor(images=__a , return_tensors=__a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__a , __a ): _a = [text] # add batch dimension (as the image processor always adds a batch dimension) _a = features["words"] _a = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) # add pixel values _a = features.pop("pixel_values" ) if return_overflowing_tokens is True: _a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] ) _a = images return encoded_inputs def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _a = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__a ) != len(__a ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f' {len(__a )} and {len(__a )}' ) return images_with_overflow def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : Tuple ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCamelCase__ ( self : int ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : List[Any] , *__a : Optional[Any] , **__a : Tuple ): warnings.warn( "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use DeformableDetrImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
63
'''simple docstring''' from ....utils import logging lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ): _a = config.__dict__ _a = modal_hidden_size if num_labels: _a = num_labels
63
1
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : List[str] = ['model.decoder.embed_positions.weights'] def _lowerCamelCase ( lowercase : Optional[int] ) -> int: if "emb" in name: _a = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: _a = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: _a = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: _a = name.replace("linear1" , "fc1" ) if "linear2" in name: _a = name.replace("linear2" , "fc2" ) if "norm1" in name: _a = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: _a = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: _a = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: _a = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: _a = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: _a = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def _lowerCamelCase ( lowercase : OrderedDict , lowercase : int ) -> Tuple[Dict, Dict]: _a = list(state_dict.keys() ) _a = {} for key in keys: _a = state_dict.pop(lowercase ) _a = rename_keys(lowercase ) if "in_proj_weight" in key: # split fused qkv proj _a = val[:hidden_size, :] _a = val[hidden_size : 2 * hidden_size, :] _a = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _a = val else: _a = val return state_dict, enc_dec_proj_state_dict def _lowerCamelCase ( lowercase : str ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values _a = 1024 _a = 24 _a = 16 elif checkpoint == "medium": _a = 1536 _a = 48 _a = 24 elif checkpoint == "large": _a = 2048 _a = 48 _a = 32 else: raise ValueError(F'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) _a = MusicgenDecoderConfig( hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , ) return config @torch.no_grad() def _lowerCamelCase ( lowercase : List[str] , lowercase : int=None , lowercase : Any=None , lowercase : Dict="cpu" ) -> Any: _a = MusicGen.get_pretrained(lowercase , device=lowercase ) _a = decoder_config_from_checkpoint(lowercase ) _a = fairseq_model.lm.state_dict() _a , _a = rename_state_dict( lowercase , hidden_size=decoder_config.hidden_size ) _a = TaEncoderModel.from_pretrained("t5-base" ) _a = EncodecModel.from_pretrained("facebook/encodec_32khz" ) _a = MusicgenForCausalLM(lowercase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _a , _a = decoder.load_state_dict(lowercase , strict=lowercase ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowercase ) if len(lowercase ) > 0: raise ValueError(F'Missing key(s) in state_dict: {missing_keys}' ) if len(lowercase ) > 0: raise ValueError(F'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model _a = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowercase ) # check we can do a forward pass _a = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _a = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _a = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor _a = AutoTokenizer.from_pretrained("t5-base" ) _a = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) _a = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase ) # set the appropriate bos/pad token ids _a = 2048 _a = 2048 # set other default generation config params _a = int(30 * audio_encoder.config.frame_rate ) _a = True _a = 3.0 if pytorch_dump_folder is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) logger.info(F'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(lowercase ) processor.save_pretrained(lowercase ) if repo_id: logger.info(F'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(lowercase ) processor.push_to_hub(lowercase ) if __name__ == "__main__": lowerCAmelCase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint', default='small', type=str, help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.', ) parser.add_argument( '--pytorch_dump_folder', required=True, default=None, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) parser.add_argument( '--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.' ) lowerCAmelCase_ : str = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int = 100 ) -> int: _a = 0 _a = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
63
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase_ : Tuple = { 'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'], 'tokenization_xlm': ['XLMTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[str] = [ 'XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMForMultipleChoice', 'XLMForQuestionAnswering', 'XLMForQuestionAnsweringSimple', 'XLMForSequenceClassification', 'XLMForTokenClassification', 'XLMModel', 'XLMPreTrainedModel', 'XLMWithLMHeadModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Tuple = [ 'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMForMultipleChoice', 'TFXLMForQuestionAnsweringSimple', 'TFXLMForSequenceClassification', 'TFXLMForTokenClassification', 'TFXLMMainLayer', 'TFXLMModel', 'TFXLMPreTrainedModel', 'TFXLMWithLMHeadModel', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: if num < 0: return False _a = num _a = 0 while num > 0: _a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
63
1
'''simple docstring''' import functools def _lowerCamelCase ( lowercase : list[int] , lowercase : list[int] ) -> int: # Validation if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(lowercase ) >= 366: raise ValueError("All days elements should be less than 366" ) _a = set(lowercase ) @functools.cache def dynamic_programming(lowercase : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
63
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[str] = [ 'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
1
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Union[str, Any] ): _a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _a = -1 _a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _a = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _a = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _a = TextStreamer(__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _a = cs.out[:-1] self.assertEqual(__a , __a ) def UpperCamelCase__ ( self : Optional[int] ): _a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _a = -1 _a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _a = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _a = tokenizer.decode(greedy_ids[0] ) _a = TextIteratorStreamer(__a ) _a = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _a = Thread(target=model.generate , kwargs=__a ) thread.start() _a = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__a , __a ) def UpperCamelCase__ ( self : str ): _a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _a = -1 _a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _a = model.generate(__a , max_new_tokens=10 , do_sample=__a ) _a = greedy_ids[:, input_ids.shape[1] :] _a = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _a = TextStreamer(__a , skip_prompt=__a ) model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _a = cs.out[:-1] self.assertEqual(__a , __a ) def UpperCamelCase__ ( self : int ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _a = AutoTokenizer.from_pretrained("distilgpt2" ) _a = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a ) _a = -1 _a = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id with CaptureStdout() as cs: _a = TextStreamer(__a , skip_special_tokens=__a ) model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _a = cs.out[:-1] # Remove the final "\n" _a = tokenizer(__a , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def UpperCamelCase__ ( self : Any ): _a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a ) _a = -1 _a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a ) _a = TextIteratorStreamer(__a , timeout=0.001 ) _a = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _a = Thread(target=model.generate , kwargs=__a ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__a ): _a = "" for new_text in streamer: streamer_text += new_text
63
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase_ : Any = get_tests_dir('fixtures') lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json') class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] ): _a = 0 def UpperCamelCase__ ( self : str ): _a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: _a = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _a = AutoFeatureExtractor.from_pretrained(__a ).to_dict() config_dict.pop("feature_extractor_type" ) _a = WavaVecaFeatureExtractor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) # make sure private variable is not incorrectly saved _a = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Union[str, Any] ): with self.assertRaisesRegex( __a , "bert-base is not a local folder and is not a valid model identifier" ): _a = AutoFeatureExtractor.from_pretrained("bert-base" ) def UpperCamelCase__ ( self : Optional[Any] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" ) def UpperCamelCase__ ( self : List[Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): _a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def UpperCamelCase__ ( self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) def UpperCamelCase__ ( self : Any ): try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoFeatureExtractor.register(__a , __a ) # Now that the config is registered, it can be used as any other config with the auto-API _a = CustomFeatureExtractor.from_pretrained(__a ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase__ ( self : Tuple ): class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =True try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # If remote code is not set, the default is to use local _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
63
1
'''simple docstring''' import os from distutils.util import strtobool def _lowerCamelCase ( lowercase : str , lowercase : int ) -> str: for e in env_keys: _a = int(os.environ.get(lowercase , -1 ) ) if val >= 0: return val return default def _lowerCamelCase ( lowercase : Tuple , lowercase : Dict=False ) -> Tuple: _a = os.environ.get(lowercase , str(lowercase ) ) return strtobool(lowercase ) == 1 # As its name indicates `strtobool` actually returns an int... def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any]="no" ) -> Tuple: _a = os.environ.get(lowercase , str(lowercase ) ) return value
63
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : int = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='gpt_bigcode' __a =['past_key_values'] __a ={ 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ): _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = attention_softmax_in_fpaa _a = scale_attention_softmax_in_fpaa _a = multi_query _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
63
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCAmelCase_ : Any = {'tokenization_byt5': ['ByT5Tokenizer']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys lowerCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: _a = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
63
1
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance lowerCAmelCase_ : List[Any] = 6_378_137.0 lowerCAmelCase_ : List[Any] = 6_356_752.314_245 lowerCAmelCase_ : int = 6_37_81_37 def _lowerCamelCase ( lowercase : float , lowercase : float , lowercase : float , lowercase : float ) -> float: _a = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _a = atan((1 - flattening) * tan(radians(lowercase ) ) ) _a = atan((1 - flattening) * tan(radians(lowercase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _a = haversine_distance(lowercase , lowercase , lowercase , lowercase ) / EQUATORIAL_RADIUS # Intermediate P and Q values _a = (b_lata + b_lata) / 2 _a = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _a = (sin(lowercase ) ** 2) * (cos(lowercase ) ** 2) _a = cos(sigma / 2 ) ** 2 _a = (sigma - sin(lowercase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _a = (cos(lowercase ) ** 2) * (sin(lowercase ) ** 2) _a = sin(sigma / 2 ) ** 2 _a = (sigma + sin(lowercase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
63
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='deta' __a ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(__a , __a ): _a = backbone_config.pop("model_type" ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(__a ) _a = backbone_config _a = num_queries _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = init_xavier_std _a = encoder_layerdrop _a = auxiliary_loss _a = position_embedding_type # deformable attributes _a = num_feature_levels _a = encoder_n_points _a = decoder_n_points _a = two_stage _a = two_stage_num_proposals _a = with_box_refine _a = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher _a = class_cost _a = bbox_cost _a = giou_cost # Loss coefficients _a = mask_loss_coefficient _a = dice_loss_coefficient _a = bbox_loss_coefficient _a = giou_loss_coefficient _a = eos_coefficient _a = focal_alpha super().__init__(is_encoder_decoder=__a , **__a ) @property def UpperCamelCase__ ( self : Optional[Any] ): return self.encoder_attention_heads @property def UpperCamelCase__ ( self : Dict ): return self.d_model def UpperCamelCase__ ( self : List[str] ): _a = copy.deepcopy(self.__dict__ ) _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output
63
1
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __SCREAMING_SNAKE_CASE (pl.LightningModule ): """simple docstring""" def __init__( self : List[Any] , __a : Optional[Any] ): super().__init__() _a = model _a = 2 _a = nn.Linear(self.model.config.hidden_size , self.num_labels ) def UpperCamelCase__ ( self : List[Any] ): pass def _lowerCamelCase ( lowercase : str , lowercase : str , lowercase : str ) -> Optional[Any]: # load longformer model from model identifier _a = LongformerModel.from_pretrained(lowercase ) _a = LightningModel(lowercase ) _a = torch.load(lowercase , map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model _a = LongformerForQuestionAnswering.from_pretrained(lowercase ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(lowercase ) print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' ) if __name__ == "__main__": lowerCAmelCase_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase_ : Dict = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
63
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]: _a = AutoTokenizer.from_pretrained(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase ) _a = tok.pad_token_id def get_lens(lowercase : Optional[int] ): _a = tqdm( DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a = [] for batch in dl: _a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist() _a = batch["labels"].ne(lowercase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowercase , lowercase ): max_lens.append(max(lowercase , lowercase ) ) else: max_lens.extend(lowercase ) return max_lens _a = get_lens(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase ) _a = get_lens(lowercase ) pickle_save(lowercase , train_ds.len_file ) pickle_save(lowercase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
63
1
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ : Tuple = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='conditional_detr' __a =['past_key_values'] __a ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Dict , __a : int=True , __a : Optional[Any]=None , __a : Optional[int]=3 , __a : List[Any]=3_00 , __a : Optional[Any]=6 , __a : Tuple=20_48 , __a : Tuple=8 , __a : Optional[int]=6 , __a : List[str]=20_48 , __a : Dict=8 , __a : Any=0.0 , __a : List[Any]=0.0 , __a : List[Any]=True , __a : Optional[Any]="relu" , __a : List[Any]=2_56 , __a : Any=0.1 , __a : Tuple=0.0 , __a : Optional[int]=0.0 , __a : int=0.02 , __a : List[Any]=1.0 , __a : List[str]=False , __a : Dict="sine" , __a : Optional[Any]="resnet50" , __a : Optional[Any]=True , __a : Union[str, Any]=False , __a : Optional[Any]=2 , __a : Union[str, Any]=5 , __a : int=2 , __a : List[str]=1 , __a : Tuple=1 , __a : Any=2 , __a : Optional[int]=5 , __a : Any=2 , __a : Optional[Any]=0.25 , **__a : Optional[Any] , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _a = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__a , __a ): _a = backbone_config.get("model_type" ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(__a ) _a = use_timm_backbone _a = backbone_config _a = num_channels _a = num_queries _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = init_xavier_std _a = encoder_layerdrop _a = decoder_layerdrop _a = encoder_layers _a = auxiliary_loss _a = position_embedding_type _a = backbone _a = use_pretrained_backbone _a = dilation # Hungarian matcher _a = class_cost _a = bbox_cost _a = giou_cost # Loss coefficients _a = mask_loss_coefficient _a = dice_loss_coefficient _a = cls_loss_coefficient _a = bbox_loss_coefficient _a = giou_loss_coefficient _a = focal_alpha super().__init__(is_encoder_decoder=__a , **__a ) @property def UpperCamelCase__ ( self : Optional[Any] ): return self.encoder_attention_heads @property def UpperCamelCase__ ( self : Dict ): return self.d_model def UpperCamelCase__ ( self : Optional[int] ): _a = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =version.parse('1.11' ) @property def UpperCamelCase__ ( self : Tuple ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def UpperCamelCase__ ( self : int ): return 1e-5 @property def UpperCamelCase__ ( self : List[str] ): return 12
63
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): _a = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): # pass variant but use the non-variant filenames _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Dict ): _a = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : List[str] ): # pass variant but use the non-variant filenames _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[int] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
63
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase_ : Any = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : str = ['ViTFeatureExtractor'] lowerCAmelCase_ : Tuple = ['ViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[Any] = [ 'VIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTForImageClassification', 'ViTForMaskedImageModeling', 'ViTModel', 'ViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[Any] = [ 'TFViTForImageClassification', 'TFViTModel', 'TFViTPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Any = [ 'FlaxViTForImageClassification', 'FlaxViTModel', 'FlaxViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
'''simple docstring''' def _lowerCamelCase ( lowercase : bytes ) -> str: return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] ) def _lowerCamelCase ( lowercase : str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(lowercase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(lowercase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
63
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue_model_parallelism.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, ] ) class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Union[str, Any] ): if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=__a , ) assert hasattr(self , "env" ) def UpperCamelCase__ ( self : Optional[int] , __a : Optional[int] ): # configuration for running training on smdistributed Model Parallel _a = { "enabled": True, "processes_per_host": 8, } _a = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } _a = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} _a = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 5_00, } , metric_definitions=self.env.metric_definitions , distribution=__a , py_version="py36" , ) def UpperCamelCase__ ( self : List[Any] , __a : Union[str, Any] ): TrainingJobAnalytics(__a ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(1,)] ) def UpperCamelCase__ ( self : Optional[int] , __a : Tuple ): # create estimator _a = self.create_estimator(__a ) # run training estimator.fit() # result dataframe _a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _a = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) _a = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _a = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __a )
63
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str: for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any: model.train() _a = model(lowercase ) _a = F.mse_loss(lowercase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase ) def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]: set_seed(42 ) _a = RegressionModel() _a = deepcopy(lowercase ) _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) model.to(accelerator.device ) if sched: _a = AdamW(params=model.parameters() , lr=1E-3 ) _a = AdamW(params=ddp_model.parameters() , lr=1E-3 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) # Make a copy of `model` if sched: _a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase ) else: _a , _a = accelerator.prepare(lowercase , lowercase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]: # Test when on a single CPU or GPU that the context manager does nothing _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase , lowercase , lowercase , lowercase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : Tuple ) -> Tuple: # Test on distributed setup that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] GradientState._reset_state() def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' _a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase )) if accelerator.num_processes > 1: check_model_parameters(lowercase , lowercase , lowercase , lowercase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def _lowerCamelCase ( ) -> Any: _a = Accelerator() _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) _a = RegressionDataset(length=96 ) _a = DataLoader(lowercase , batch_size=16 ) _a , _a = accelerator.prepare(lowercase , lowercase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if iteration < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if batch_num < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _lowerCamelCase ( ) -> Optional[Any]: _a = Accelerator() _a = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(lowercase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(lowercase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(lowercase , lowercase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase ) def _lowerCamelCase ( lowercase : Any ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
63
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase_ : List[str] = { 'post_extract_proj': 'feature_projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.upsample.0': 'encoder.upsample.projection', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> List[Any]: for attribute in key.split("." ): _a = getattr(lowercase , lowercase ) if weight_type is not None: _a = getattr(lowercase , lowercase ).shape else: _a = hf_pointer.shape assert hf_shape == value.shape, ( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value else: _a = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[int] , lowercase : Dict ) -> Dict: _a = [] _a = fairseq_model.state_dict() _a = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _a = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == "group" , ) _a = True else: for key, mapped_key in MAPPING.items(): _a = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _a = True if "*" in mapped_key: _a = name.split(lowercase )[0].split("." )[-2] _a = mapped_key.replace("*" , lowercase ) if "weight_g" in name: _a = "weight_g" elif "weight_v" in name: _a = "weight_v" elif "weight" in name: _a = "weight" elif "bias" in name: _a = "bias" else: _a = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(F'Unused weights: {unused_weights}' ) def _lowerCamelCase ( lowercase : Any , lowercase : int , lowercase : List[str] , lowercase : Optional[int] , lowercase : List[str] ) -> Any: _a = full_name.split("conv_layers." )[-1] _a = name.split("." ) _a = int(items[0] ) _a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _a = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _a = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) _a = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) _a = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowercase ) def _lowerCamelCase ( lowercase : str , lowercase : int ) -> List[str]: _a = SEWConfig() if is_finetuned: _a = model.wav_encoder.wav_model.cfg else: _a = model.cfg _a = fs_config.conv_bias _a = eval(fs_config.conv_feature_layers ) _a = [x[0] for x in conv_layers] _a = [x[1] for x in conv_layers] _a = [x[2] for x in conv_layers] _a = "gelu" _a = "layer" if fs_config.extractor_mode == "layer_norm" else "group" _a = 0.0 _a = fs_config.activation_fn.name _a = fs_config.encoder_embed_dim _a = 0.02 _a = fs_config.encoder_ffn_embed_dim _a = 1E-5 _a = fs_config.encoder_layerdrop _a = fs_config.encoder_attention_heads _a = fs_config.conv_pos_groups _a = fs_config.conv_pos _a = len(lowercase ) _a = fs_config.encoder_layers _a = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _a = model.cfg _a = fs_config.final_dropout _a = fs_config.layerdrop _a = fs_config.activation_dropout _a = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _a = fs_config.attention_dropout _a = fs_config.dropout_input _a = fs_config.dropout _a = fs_config.mask_channel_length _a = fs_config.mask_channel_prob _a = fs_config.mask_length _a = fs_config.mask_prob _a = "Wav2Vec2FeatureExtractor" _a = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : str=None , lowercase : Union[str, Any]=None , lowercase : Optional[int]=True ) -> str: if is_finetuned: _a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _a = SEWConfig.from_pretrained(lowercase ) else: _a = convert_config(model[0] , lowercase ) _a = model[0].eval() _a = True if config.feat_extract_norm == "layer" else False _a = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) if is_finetuned: if dict_path: _a = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _a = target_dict.pad_index _a = target_dict.bos_index _a = target_dict.pad_index _a = target_dict.bos_index _a = target_dict.eos_index _a = len(target_dict.symbols ) _a = os.path.join(lowercase , "vocab.json" ) if not os.path.isdir(lowercase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) with open(lowercase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , lowercase ) _a = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowercase , ) _a = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) _a = SEWForCTC(lowercase ) else: _a = SEWModel(lowercase ) feature_extractor.save_pretrained(lowercase ) recursively_load_weights(lowercase , lowercase , lowercase ) hf_model.save_pretrained(lowercase ) if __name__ == "__main__": lowerCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) lowerCAmelCase_ : int = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
63
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : List[str] = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='trocr' __a =['past_key_values'] __a ={ 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ): _a = vocab_size _a = d_model _a = decoder_layers _a = decoder_attention_heads _a = decoder_ffn_dim _a = activation_function _a = max_position_embeddings _a = dropout _a = attention_dropout _a = activation_dropout _a = init_std _a = decoder_layerdrop _a = use_cache _a = scale_embedding _a = use_learned_position_embeddings _a = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
63
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ : Any = 16 lowerCAmelCase_ : Dict = 32 def _lowerCamelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> Union[str, Any]: _a = AutoTokenizer.from_pretrained("bert-base-cased" ) _a = load_dataset("glue" , "mrpc" ) def tokenize_function(lowercase : List[str] ): # max_length=None => use the model max length (it's actually the default) _a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _a = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _a = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase : Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. _a = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _a = 16 elif accelerator.mixed_precision != "no": _a = 8 else: _a = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. _a = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) _a = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase_ : Dict = mocked_dataloaders # noqa: F811 def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Tuple ) -> int: # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1": _a = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _a = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir ) else: _a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _a = config["lr"] _a = int(config["num_epochs"] ) _a = int(config["seed"] ) _a = int(config["batch_size"] ) set_seed(lowercase ) _a , _a = get_dataloaders(lowercase , lowercase ) _a = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation _a = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _a = batch_size // MAX_GPU_BATCH_SIZE _a = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _a = model.to(accelerator.device ) # Instantiate optimizer _a = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler _a = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _a , _a , _a , _a , _a = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _a = os.path.split(lowercase )[-1].split("." )[0] accelerator.init_trackers(lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _a = 0 for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _a = model(**lowercase ) _a = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _a = loss / gradient_accumulation_steps accelerator.backward(lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _a = model(**lowercase ) _a = outputs.logits.argmax(dim=-1 ) _a , _a = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) _a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowercase ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(lowercase ), "epoch": epoch, } , step=lowercase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def _lowerCamelCase ( ) -> Dict: _a = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) _a = parser.parse_args() _a = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
63
'''simple docstring''' import argparse import os import re lowerCAmelCase_ : Any = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"') def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]: with open(lowercase , "r" , encoding="utf-8" ) as f: _a = f.read() _a = content.split("\n" ) _a = [] _a = 0 while line_idx < len(lowercase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: _a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 _a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": _a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers _a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowercase , "w" , encoding="utf-8" ) as f: f.write("\n".join(lowercase ) ) elif "\n".join(lowercase ) != content: return True def _lowerCamelCase ( lowercase : bool = False ) -> List[str]: _a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )] _a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames] if not overwrite and any(lowercase ): _a = [f for f, d in zip(lowercase , lowercase ) if d] raise ValueError( F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix' " this." ) if __name__ == "__main__": lowerCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') lowerCAmelCase_ : Optional[int] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
63
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='ViltImageProcessor' __a =('BertTokenizer', 'BertTokenizerFast') def __init__( self : str , __a : Any=None , __a : Tuple=None , **__a : Optional[int] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) _a = self.image_processor def __call__( self : Any , __a : str , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ): _a = self.tokenizer( text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) # add pixel_values + pixel_mask _a = self.image_processor(__a , return_tensors=__a ) encoding.update(__a ) return encoding def UpperCamelCase__ ( self : Dict , *__a : Union[str, Any] , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : Optional[Any] , *__a : Optional[Any] , **__a : Optional[Any] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : Optional[int] ): _a = self.tokenizer.model_input_names _a = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase__ ( self : int ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : Union[str, Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='big_bird' def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ): super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) _a = vocab_size _a = max_position_embeddings _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = type_vocab_size _a = layer_norm_eps _a = use_cache _a = rescale_embeddings _a = attention_type _a = use_bias _a = block_size _a = num_random_blocks _a = classifier_dropout class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property def UpperCamelCase__ ( self : Optional[int] ): if self.task == "multiple-choice": _a = {0: "batch", 1: "choice", 2: "sequence"} else: _a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
63
1
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger lowerCAmelCase_ : Optional[Any] = get_logger(__name__) lowerCAmelCase_ : Tuple = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n' class __SCREAMING_SNAKE_CASE : """simple docstring""" @add_start_docstrings(__a ) def __call__( self : Dict , __a : jnp.ndarray , __a : jnp.ndarray ): raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class __SCREAMING_SNAKE_CASE : """simple docstring""" @add_start_docstrings(__a ) def __call__( self : List[str] , __a : jnp.ndarray , __a : jnp.ndarray ): raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @add_start_docstrings(__a ) def __call__( self : Tuple , __a : jnp.ndarray , __a : jnp.ndarray , __a : int , **__a : Union[str, Any] ): for processor in self: _a = inspect.signature(processor.__call__ ).parameters if len(__a ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'Make sure that all the required parameters: {list(function_args.keys() )} for ' f'{processor.__class__} are passed to the logits processor.' ) _a = processor(__a , __a , __a , **__a ) else: _a = processor(__a , __a , __a ) return scores class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Dict , __a : float ): if not isinstance(__a , __a ) or not (temperature > 0): raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' ) _a = temperature def __call__( self : int , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ): _a = scores / self.temperature return scores class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Optional[int] , __a : float , __a : float = -float("Inf" ) , __a : int = 1 ): if not isinstance(__a , __a ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' ) if not isinstance(__a , __a ) or (min_tokens_to_keep < 1): raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' ) _a = top_p _a = filter_value _a = min_tokens_to_keep def __call__( self : Dict , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ): _a , _a = lax.top_k(__a , scores.shape[-1] ) _a = jnp.full_like(__a , self.filter_value ) _a = jax.nn.softmax(__a , axis=-1 ).cumsum(axis=-1 ) _a = cumulative_probs < self.top_p # include the token that is higher than top_p as well _a = jnp.roll(__a , 1 ) score_mask |= score_mask.at[:, 0].set(__a ) # min tokens to keep _a = score_mask.at[:, : self.min_tokens_to_keep].set(__a ) _a = jnp.where(__a , __a , __a ) _a = jax.lax.sort_key_val(__a , __a )[-1] return next_scores class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , __a : int , __a : float = -float("Inf" ) , __a : int = 1 ): if not isinstance(__a , __a ) or top_k <= 0: raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' ) _a = max(__a , __a ) _a = filter_value def __call__( self : Dict , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ): _a , _a = scores.shape _a = jnp.full(batch_size * vocab_size , self.filter_value ) _a = min(self.top_k , scores.shape[-1] ) # Safety check _a , _a = lax.top_k(__a , __a ) _a = jnp.broadcast_to((jnp.arange(__a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() _a = topk_scores.flatten() _a = topk_indices.flatten() + shift _a = next_scores_flat.at[topk_indices_flat].set(__a ) _a = next_scores_flat.reshape(__a , __a ) return next_scores class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , __a : int ): _a = bos_token_id def __call__( self : Optional[int] , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ): _a = jnp.full(scores.shape , -float("inf" ) ) _a = 1 - jnp.bool_(cur_len - 1 ) _a = jnp.where(__a , new_scores.at[:, self.bos_token_id].set(0 ) , __a ) return scores class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple , __a : int , __a : int ): _a = max_length _a = eos_token_id def __call__( self : List[Any] , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ): _a = jnp.full(scores.shape , -float("inf" ) ) _a = 1 - jnp.bool_(cur_len - self.max_length + 1 ) _a = jnp.where(__a , new_scores.at[:, self.eos_token_id].set(0 ) , __a ) return scores class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : List[str] , __a : int , __a : int ): if not isinstance(__a , __a ) or min_length < 0: raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' ) if not isinstance(__a , __a ) or eos_token_id < 0: raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' ) _a = min_length _a = eos_token_id def __call__( self : Dict , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ): # create boolean flag to decide if min length penalty should be applied _a = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) _a = jnp.where(__a , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __a ) return scores class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Any , __a : Any , __a : Union[str, Any] ): _a = list(__a ) _a = begin_index def __call__( self : int , __a : List[Any] , __a : List[Any] , __a : int ): _a = 1 - jnp.bool_(cur_len - self.begin_index ) _a = jnp.where(__a , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __a ) return scores class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , __a : list ): _a = list(__a ) def __call__( self : Tuple , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ): _a = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : int , __a : int ): _a = dict(__a ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. _a = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: _a = force_token_array.at[index].set(__a ) _a = jnp.intaa(__a ) def __call__( self : List[Any] , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ): def _force_token(__a : Dict ): _a = scores.shape[0] _a = self.force_token_array[generation_idx] _a = jnp.ones_like(__a , dtype=scores.dtype ) * -float("inf" ) _a = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) _a = lax.dynamic_update_slice(__a , __a , (0, current_token) ) return new_scores _a = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(__a ) , lambda: scores , ) , ) return scores class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : str , __a : int , __a : Optional[int] , __a : List[str] ): _a = generate_config.eos_token_id _a = generate_config.no_timestamps_token_id _a = generate_config.no_timestamps_token_id + 1 _a = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(__a , "max_initial_timestamp_index" ): _a = generate_config.max_initial_timestamp_index else: _a = model_config.vocab_size if self.max_initial_timestamp_index is None: _a = model_config.vocab_size def __call__( self : List[Any] , __a : Union[str, Any] , __a : int , __a : Tuple ): # suppress <|notimestamps|> which is handled by without_timestamps _a = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(__a : Tuple , __a : str ): _a = jnp.where((cur_len - self.begin_index) >= 1 , __a , __a ) _a = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __a , ) _a = jnp.where((cur_len - self.begin_index) < 2 , __a , __a ) _a = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , __a , __a , ) return jnp.where( __a , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __a , ) _a = jax.vmap(__a )(__a , __a ) _a = jnp.where(cur_len == self.begin_index , __a , __a ) _a = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __a , ) _a = self.timestamp_begin + self.max_initial_timestamp_index _a = jnp.where( __a , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __a , ) # if sum of probability over timestamps is above any other token, sample timestamp _a = jax.nn.log_softmax(__a , axis=-1 ) def handle_cumulative_probs(__a : List[Any] , __a : Tuple ): _a = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) _a = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __a , ) _a = jax.vmap(__a )(__a , __a ) return scores
63
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" @register_to_config def __init__( self : Dict , *, __a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ): super().__init__() _a = nn.Parameter(torch.zeros(__a ) ) # parameters for additional clip time embeddings _a = nn.Linear(__a , __a ) _a = nn.Linear(__a , __a ) # parameters for encoder hidden states _a = clip_extra_context_tokens _a = nn.Linear( __a , self.clip_extra_context_tokens * cross_attention_dim ) _a = nn.Linear(__a , __a ) _a = nn.LayerNorm(__a ) def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _a = image_embeddings.shape[0] _a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _a = classifier_free_guidance_embeddings.expand( __a , -1 ) _a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _a = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _a = self.embedding_proj(__a ) _a = self.clip_image_embeddings_project_to_time_embeddings(__a ) _a = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _a = self.clip_extra_context_tokens_proj(__a ) _a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens ) _a = clip_extra_context_tokens.permute(0 , 2 , 1 ) _a = self.encoder_hidden_states_proj(__a ) _a = self.text_encoder_hidden_states_norm(__a ) _a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
63
1
'''simple docstring''' from __future__ import annotations import csv import requests from bsa import BeautifulSoup def _lowerCamelCase ( lowercase : str = "" ) -> dict[str, float]: _a = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" _a = BeautifulSoup(requests.get(lowercase ).text , "html.parser" ) _a = soup.find_all("td" , attrs="titleColumn" ) _a = soup.find_all("td" , class_="ratingColumn imdbRating" ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(lowercase , lowercase ) } def _lowerCamelCase ( lowercase : str = "IMDb_Top_250_Movies.csv" ) -> None: _a = get_imdb_top_aaa_movies() with open(lowercase , "w" , newline="" ) as out_file: _a = csv.writer(lowercase ) writer.writerow(["Movie title", "IMDb rating"] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
63
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _lowerCamelCase ( lowercase : Dict ) -> Any: _a = filter(lambda lowercase : p.requires_grad , model.parameters() ) _a = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_ : int = logging.getLogger(__name__) def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any: if metric == "rouge2": _a = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": _a = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": _a = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' " function." ) _a = ModelCheckpoint( dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]: return EarlyStopping( monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , ) class __SCREAMING_SNAKE_CASE (pl.Callback ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): _a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__a ) @rank_zero_only def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ): logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) _a = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results _a = Path(pl_module.hparams.output_dir ) if type_path == "test": _a = od / "test_results.txt" _a = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _a = od / f'{type_path}_results/{trainer.global_step:05d}.txt' _a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=__a ) generations_file.parent.mkdir(exist_ok=__a ) with open(__a , "a+" ) as writer: for key in sorted(__a ): if key in ["log", "progress_bar", "preds"]: continue _a = metrics[key] if isinstance(__a , torch.Tensor ): _a = val.item() _a = f'{key}: {val:.6f}\n' writer.write(__a ) if not save_generations: return if "preds" in metrics: _a = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__a ) @rank_zero_only def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ): try: _a = pl_module.model.model.num_parameters() except AttributeError: _a = pl_module.model.num_parameters() _a = count_trainable_parameters(__a ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} ) @rank_zero_only def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__a , __a , "test" ) @rank_zero_only def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
63
1
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase_ : str = logging.get_logger(__name__) lowerCAmelCase_ : Any = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='codegen' __a ={ 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Dict , __a : Any=5_04_00 , __a : Union[str, Any]=20_48 , __a : List[Any]=20_48 , __a : Dict=40_96 , __a : List[Any]=28 , __a : List[str]=16 , __a : List[Any]=64 , __a : Any=None , __a : Tuple="gelu_new" , __a : List[str]=0.0 , __a : Tuple=0.0 , __a : Optional[int]=0.0 , __a : Tuple=1e-5 , __a : int=0.02 , __a : Union[str, Any]=True , __a : List[str]=5_02_56 , __a : int=5_02_56 , __a : Optional[int]=False , **__a : List[Any] , ): _a = vocab_size _a = n_ctx _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = rotary_dim _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = use_cache _a = bos_token_id _a = eos_token_id super().__init__( bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Optional[int] , __a : PretrainedConfig , __a : str = "default" , __a : List[PatchingSpec] = None , __a : bool = False , ): super().__init__(__a , task=__a , patching_specs=__a , use_past=__a ) if not getattr(self._config , "pad_token_id" , __a ): # TODO: how to do that better? _a = 0 @property def UpperCamelCase__ ( self : Union[str, Any] ): _a = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(__a , direction="inputs" ) _a = {0: "batch", 1: "past_sequence + sequence"} else: _a = {0: "batch", 1: "sequence"} return common_inputs @property def UpperCamelCase__ ( self : str ): return self._config.n_layer @property def UpperCamelCase__ ( self : int ): return self._config.n_head def UpperCamelCase__ ( self : List[Any] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ): _a = super(__a , self ).generate_dummy_inputs( __a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a ) # We need to order the input in the way they appears in the forward() _a = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _a , _a = common_inputs["input_ids"].shape # Not using the same length for past_key_values _a = seqlen + 2 _a = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _a = [ (torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers ) ] _a = common_inputs["attention_mask"] if self.use_past: _a = ordered_inputs["attention_mask"].dtype _a = torch.cat( [ordered_inputs["attention_mask"], torch.ones(__a , __a , dtype=__a )] , dim=1 ) return ordered_inputs @property def UpperCamelCase__ ( self : int ): return 13
63
'''simple docstring''' import math class __SCREAMING_SNAKE_CASE : """simple docstring""" def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ): _a = 0.0 _a = 0.0 for i in range(len(__a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ): for i in range(len(__a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _lowerCamelCase ( ) -> None: # Training Examples ( m, n ) _a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _a = SelfOrganizingMap() _a = 3 _a = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _a = training_samples[j] # Compute the winning vector _a = self_organizing_map.get_winner(lowercase , lowercase ) # Update the winning vector _a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase ) # classify test sample _a = [0, 0, 0, 1] _a = self_organizing_map.get_winner(lowercase , lowercase ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
63
1
'''simple docstring''' def _lowerCamelCase ( lowercase : List[str] ) -> List[str]: return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def _lowerCamelCase ( lowercase : dict[int, list[int]] ) -> list[tuple[int, int]]: _a = 0 _a = len(lowercase ) # No of vertices in graph _a = [0] * n _a = [False] * n def dfs(lowercase : Optional[Any] , lowercase : int , lowercase : Optional[Any] , lowercase : Optional[Any] ): _a = True _a = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(lowercase , lowercase , lowercase , id_ ) _a = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge _a = min(low[at] , low[to] ) _a = [] for i in range(lowercase ): if not visited[i]: dfs(lowercase , -1 , lowercase , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
63
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='OwlViTImageProcessor' __a =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )): _a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )] elif isinstance(__a , __a ) and isinstance(text[0] , __a ): _a = [] # Maximum number of queries across batch _a = max([len(__a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__a ) != max_num_queries: _a = t + [" "] * (max_num_queries - len(__a )) _a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a ) encodings.append(__a ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _a = BatchEncoding() _a = input_ids _a = attention_mask if query_images is not None: _a = BatchEncoding() _a = self.image_processor( __a , return_tensors=__a , **__a ).pixel_values _a = query_pixel_values if images is not None: _a = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: _a = image_features.pixel_values return encoding elif query_images is not None and images is not None: _a = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ): return self.image_processor.post_process(*__a , **__a ) def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ): return self.image_processor.post_process_object_detection(*__a , **__a ) def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ): return self.image_processor.post_process_image_guided_detection(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : str ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
1
'''simple docstring''' import math from collections.abc import Iterator from itertools import takewhile def _lowerCamelCase ( lowercase : int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCamelCase ( ) -> Iterator[int]: _a = 2 while True: if is_prime(lowercase ): yield num num += 1 def _lowerCamelCase ( lowercase : int = 200_0000 ) -> int: return sum(takewhile(lambda lowercase : x < n , prime_generator() ) ) if __name__ == "__main__": print(f"""{solution() = }""")
63
'''simple docstring''' def _lowerCamelCase ( lowercase : str ) -> list: if n_term == "": return [] _a = [] for temp in range(int(lowercase ) ): series.append(F'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
63
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =None __a =None __a =None __a =None class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple , __a : Tuple=1 , __a : List[Any]=0 , __a : Any=2 , __a : Any=5_12 , __a : Optional[Any]="cls" , __a : List[Any]=False , __a : Tuple=True , **__a : Tuple , ): super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) _a = project_dim _a = pooler_fn _a = learn_encoder _a = use_attention_mask class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =[r'pooler', r'logit_scale'] __a =[r'position_ids', r'predictions.decoder.bias'] __a ='roberta' __a =RobertaSeriesConfig def __init__( self : Optional[int] , __a : List[Any] ): super().__init__(__a ) _a = XLMRobertaModel(__a ) _a = nn.Linear(config.hidden_size , config.project_dim ) _a = getattr(__a , "has_pre_transformation" , __a ) if self.has_pre_transformation: _a = nn.Linear(config.hidden_size , config.project_dim ) _a = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def UpperCamelCase__ ( self : Any , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ): _a = return_dict if return_dict is not None else self.config.use_return_dict _a = self.base_model( input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , ) if self.has_pre_transformation: _a = outputs["hidden_states"][-2] _a = self.pre_LN(__a ) _a = self.transformation_pre(__a ) return TransformationModelOutput( projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: _a = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
63
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) lowerCAmelCase_ : List[str] = logging.getLogger(__name__) lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE def _lowerCamelCase ( ) -> Optional[int]: _a = argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." ) _a = parser.parse_args() return args def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]: try: if args.tpu_name: _a = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: _a = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(lowercase ) tf.tpu.experimental.initialize_tpu_system(lowercase ) return tpu def _lowerCamelCase ( lowercase : List[str] ) -> Any: _a = 0 for file in file_list: _a = file.split("/" )[-1] _a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 ) _a = int(lowercase ) num_samples += sample_count return num_samples def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int: _a = count_samples(lowercase ) _a = tf.data.Dataset.from_tensor_slices(lowercase ) if shuffle: _a = dataset.shuffle(len(lowercase ) ) _a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here _a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) if shuffle: assert shuffle_buffer_size is not None _a = dataset.shuffle(args.shuffle_buffer_size ) _a = dataset.batch(lowercase , drop_remainder=lowercase ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) _a = dataset.prefetch(lowercase ) return dataset def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict: if not args.no_tpu: _a = initialize_tpu(lowercase ) _a = tf.distribute.TPUStrategy(lowercase ) else: _a = tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) _a = AutoTokenizer.from_pretrained(args.tokenizer ) _a = AutoConfig.from_pretrained(args.pretrained_model_config ) _a = tokenizer.vocab_size _a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' ) _a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' ) _a = count_samples(lowercase ) _a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) _a = steps_per_epoch * args.num_epochs with strategy.scope(): _a = TFAutoModelForMaskedLM.from_config(lowercase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built _a , _a = create_optimizer( num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowercase , metrics=["accuracy"] ) def decode_fn(lowercase : int ): _a = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(lowercase , lowercase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. _a = DataCollatorForLanguageModeling( tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" ) def mask_with_collator(lowercase : List[Any] ): # TF really needs an isin() function _a = ( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) _a , _a = data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , ) return batch _a = args.per_replica_batch_size * strategy.num_replicas_in_sync _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , ) _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , ) _a = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) ) model.fit( lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": lowerCAmelCase_ : Any = parse_args() main(args)
63
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" __a =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __a =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def UpperCamelCase__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict , __a : List[str] ): _a = TextaTextGenerationPipeline(model=__a , tokenizer=__a ) return generator, ["Something to write", "Something else"] def UpperCamelCase__ ( self : List[str] , __a : List[Any] , __a : Any ): _a = generator("Something there" ) self.assertEqual(__a , [{"generated_text": ANY(__a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) ) _a = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a ) self.assertEqual( __a , [ [{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}], [{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}], ] , ) _a = generator( ["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a ) self.assertEqual( __a , [ [{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}], [{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}], ] , ) with self.assertRaises(__a ): generator(4 ) @require_torch def UpperCamelCase__ ( self : Optional[int] ): _a = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" ) # do_sample=False necessary for reproducibility _a = generator("Something there" , do_sample=__a ) self.assertEqual(__a , [{"generated_text": ""}] ) _a = 3 _a = generator( "Something there" , num_return_sequences=__a , num_beams=__a , ) _a = [ {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": ""}, ] self.assertEqual(__a , __a ) _a = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a ) self.assertEqual( __a , [ {"generated_token_ids": ANY(torch.Tensor )}, {"generated_token_ids": ANY(torch.Tensor )}, ] , ) _a = generator.model.config.eos_token_id _a = "<pad>" _a = generator( ["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , ) self.assertEqual( __a , [ [ {"generated_token_ids": ANY(torch.Tensor )}, {"generated_token_ids": ANY(torch.Tensor )}, ], [ {"generated_token_ids": ANY(torch.Tensor )}, {"generated_token_ids": ANY(torch.Tensor )}, ], ] , ) @require_tf def UpperCamelCase__ ( self : Dict ): _a = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" ) # do_sample=False necessary for reproducibility _a = generator("Something there" , do_sample=__a ) self.assertEqual(__a , [{"generated_text": ""}] )
63
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='LayoutLMv3ImageProcessor' __a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor _a = self.image_processor(images=__a , return_tensors=__a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__a , __a ): _a = [text] # add batch dimension (as the image processor always adds a batch dimension) _a = features["words"] _a = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) # add pixel values _a = features.pop("pixel_values" ) if return_overflowing_tokens is True: _a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] ) _a = images return encoded_inputs def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _a = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__a ) != len(__a ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f' {len(__a )} and {len(__a )}' ) return images_with_overflow def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : Tuple ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCamelCase__ ( self : int ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
1
'''simple docstring''' import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model lowerCAmelCase_ : Optional[int] = '0.12' # assumed parallelism: 8 if is_torch_available(): import torch def _lowerCamelCase ( lowercase : Tuple , lowercase : str , lowercase : int=None ) -> str: if rng is None: _a = random.Random() _a = 1 for dim in shape: total_dims *= dim _a = [] for _ in range(lowercase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _a = np.array(lowercase , dtype=jnp.intaa ).reshape(lowercase ) return output def _lowerCamelCase ( lowercase : str , lowercase : Union[str, Any]=None ) -> List[Any]: _a = ids_tensor(lowercase , vocab_size=2 , rng=lowercase ) # make sure that at least one token is attended to for each batch _a = 1 return attn_mask @require_flax class __SCREAMING_SNAKE_CASE : """simple docstring""" __a =None __a =() def UpperCamelCase__ ( self : int ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _a = 2 _a = inputs["input_ids"].shape[-1] // 2 _a = inputs["input_ids"][:max_batch_size, :sequence_length] _a = jnp.ones_like(__a ) _a = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _a = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _a = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def UpperCamelCase__ ( self : Tuple ): _a , _a , _a , _a = self._get_input_ids_and_config() _a = False _a = max_length _a = 0 for model_class in self.all_generative_model_classes: _a = model_class(__a ) _a = model_class.__name__[4:] # Skip the "Flax" at the beginning _a = getattr(__a , __a ) _a = pt_model_class(__a ).eval() _a = load_flax_weights_in_pytorch_model(__a , flax_model.params ) _a = flax_model.generate(__a ).sequences _a = pt_model.generate(torch.tensor(__a , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def UpperCamelCase__ ( self : Any ): _a , _a , _a , _a = self._get_input_ids_and_config() _a = False _a = max_length for model_class in self.all_generative_model_classes: _a = model_class(__a ) _a = model.generate(__a ).sequences self.assertEqual(generation_outputs.shape[-1] , __a ) _a = jit(model.generate ) _a = jit_generate(__a ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self : int ): _a , _a , _a , _a = self._get_input_ids_and_config() _a = True _a = max_length for model_class in self.all_generative_model_classes: _a = model_class(__a ) _a = model.generate(__a ).sequences self.assertEqual(generation_outputs.shape[-1] , __a ) _a = jit(model.generate ) _a = jit_generate(__a ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self : int ): _a , _a , _a , _a = self._get_input_ids_and_config() _a = False _a = max_length _a = 2 for model_class in self.all_generative_model_classes: _a = model_class(__a ) _a = model.generate(__a ).sequences self.assertEqual(generation_outputs.shape[-1] , __a ) _a = jit(model.generate ) _a = jit_generate(__a ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self : Union[str, Any] ): _a , _a , _a , _a = self._get_input_ids_and_config() _a = False _a = max_length _a = 2 _a = 2 for model_class in self.all_generative_model_classes: _a = model_class(__a ) _a = model.generate(__a ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def UpperCamelCase__ ( self : Optional[int] ): _a , _a , _a , _a = self._get_input_ids_and_config() _a = True _a = max_length _a = 0.8 _a = 10 _a = 0.3 _a = 1 _a = 8 _a = 9 for model_class in self.all_generative_model_classes: _a = model_class(__a ) _a = model.generate(__a ).sequences self.assertEqual(generation_outputs.shape[-1] , __a ) _a = jit(model.generate ) _a = jit_generate(__a ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self : int ): _a , _a , _a , _a = self._get_input_ids_and_config() _a = max_length _a = 1 _a = 8 _a = 9 for model_class in self.all_generative_model_classes: _a = model_class(__a ) _a = model.generate(__a ).sequences self.assertEqual(generation_outputs.shape[-1] , __a ) _a = jit(model.generate ) _a = jit_generate(__a ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self : List[Any] ): _a , _a , _a , _a = self._get_input_ids_and_config() _a = max_length _a = 2 _a = 1 _a = 8 _a = 9 for model_class in self.all_generative_model_classes: _a = model_class(__a ) _a = model.generate(__a ).sequences self.assertEqual(generation_outputs.shape[-1] , __a ) _a = jit(model.generate ) _a = jit_generate(__a ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self : List[str] ): _a , _a , _a , _a = self._get_input_ids_and_config() # pad attention mask on the left _a = attention_mask.at[(0, 0)].set(0 ) _a = False _a = max_length for model_class in self.all_generative_model_classes: _a = model_class(__a ) _a = model.generate(__a , attention_mask=__a ).sequences self.assertEqual(generation_outputs.shape[-1] , __a ) _a = jit(model.generate ) _a = jit_generate(__a , attention_mask=__a ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self : str ): _a , _a , _a , _a = self._get_input_ids_and_config() # pad attention mask on the left _a = attention_mask.at[(0, 0)].set(0 ) _a = True _a = max_length for model_class in self.all_generative_model_classes: _a = model_class(__a ) _a = model.generate(__a , attention_mask=__a ).sequences self.assertEqual(generation_outputs.shape[-1] , __a ) _a = jit(model.generate ) _a = jit_generate(__a , attention_mask=__a ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self : Any ): _a , _a , _a , _a = self._get_input_ids_and_config() # pad attention mask on the left _a = attention_mask.at[(0, 0)].set(0 ) _a = 2 _a = max_length for model_class in self.all_generative_model_classes: _a = model_class(__a ) _a = model.generate(__a , attention_mask=__a ).sequences self.assertEqual(generation_outputs.shape[-1] , __a ) _a = jit(model.generate ) _a = jit_generate(__a , attention_mask=__a ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Union[str, Any] ): _a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) _a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) _a = "Hello world" _a = tokenizer(__a , return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__a , "do_samples" ): model.generate(__a , do_samples=__a ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__a , "foo" ): _a = {"foo": "bar"} model.generate(__a , **__a )
63
'''simple docstring''' from ....utils import logging lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ): _a = config.__dict__ _a = modal_hidden_size if num_labels: _a = num_labels
63
1
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase_ : List[str] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __SCREAMING_SNAKE_CASE : """simple docstring""" __a =PegasusConfig __a ={} __a ='gelu' def __init__( self : Optional[Any] , __a : str , __a : List[str]=13 , __a : Union[str, Any]=7 , __a : List[str]=True , __a : Optional[int]=False , __a : Tuple=99 , __a : Dict=32 , __a : str=5 , __a : Any=4 , __a : Optional[int]=37 , __a : Optional[int]=0.1 , __a : Tuple=0.1 , __a : Optional[Any]=20 , __a : List[str]=2 , __a : Optional[Any]=1 , __a : Optional[int]=0 , ): _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = eos_token_id _a = pad_token_id _a = bos_token_id def UpperCamelCase__ ( self : Any ): _a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _a = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _a = np.concatenate([input_ids, eos_tensor] , axis=1 ) _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _a = prepare_pegasus_inputs_dict(__a , __a , __a ) return config, inputs_dict def UpperCamelCase__ ( self : str , __a : Tuple , __a : List[Any] , __a : Optional[int] ): _a = 20 _a = model_class_name(__a ) _a = model.encode(inputs_dict["input_ids"] ) _a , _a = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _a = model.init_cache(decoder_input_ids.shape[0] , __a , __a ) _a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) _a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a = model.decode( decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , ) _a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) _a = model.decode( decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , ) _a = model.decode(__a , __a ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : int , __a : Dict ): _a = 20 _a = model_class_name(__a ) _a = model.encode(inputs_dict["input_ids"] ) _a , _a = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _a = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _a = model.init_cache(decoder_input_ids.shape[0] , __a , __a ) _a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a = model.decode( decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , ) _a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) _a = model.decode( decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , ) _a = model.decode(__a , __a , decoder_attention_mask=__a ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def _lowerCamelCase ( lowercase : Tuple , lowercase : Optional[int] , lowercase : int , lowercase : str=None , lowercase : Any=None , ) -> Union[str, Any]: if attention_mask is None: _a = np.not_equal(lowercase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _a = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a =True __a =False __a =False __a =False def UpperCamelCase__ ( self : Tuple ): _a = FlaxPegasusModelTester(self ) _a = ConfigTester(self , config_class=__a ) def UpperCamelCase__ ( self : Dict ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self : List[Any] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__a , __a , __a ) def UpperCamelCase__ ( self : str ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a ) def UpperCamelCase__ ( self : Any ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = self._prepare_for_class(__a , __a ) _a = model_class(__a ) @jax.jit def encode_jitted(__a : Dict , __a : Dict=None , **__a : str ): return model.encode(input_ids=__a , attention_mask=__a ) with self.subTest("JIT Enabled" ): _a = encode_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _a = encode_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCamelCase__ ( self : Optional[int] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = model_class(__a ) _a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) _a = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(__a : Dict , __a : List[str] , __a : str ): return model.decode( decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , ) with self.subTest("JIT Enabled" ): _a = decode_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _a = decode_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCamelCase__ ( self : List[Any] ): for model_class_name in self.all_model_classes: _a = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__a ) _a = np.ones((1, 1) ) _a = model(__a ) self.assertIsNotNone(__a ) @slow def UpperCamelCase__ ( self : str ): _a = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" ) _a = PegasusTokenizer.from_pretrained("google/pegasus-xsum" ) _a = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] _a = [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] _a = tokenizer(__a , return_tensors="np" , truncation=__a , max_length=5_12 , padding=__a ) _a = model.generate(**__a , num_beams=2 ).sequences _a = tokenizer.batch_decode(__a , skip_special_tokens=__a ) assert tgt_text == decoded
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int = 100 ) -> int: _a = 0 _a = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
63
1
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCAmelCase_ : List[Any] = get_logger(__name__) class __SCREAMING_SNAKE_CASE : """simple docstring""" __a ='dummy_data' __a ='datasets' __a =False def __init__( self : Dict , __a : str , __a : str , __a : Union[Version, str] , __a : Optional[str] = None , __a : bool = False , __a : bool = True , __a : Optional[List[Callable]] = None , ): _a = 0 _a = dataset_name _a = cache_dir _a = use_local_dummy_data _a = config # download_callbacks take a single url as input _a = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _a = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _a = str(__a ) # to be downloaded _a = None _a = None @property def UpperCamelCase__ ( self : Dict ): if self._dummy_file is None: _a = self.download_dummy_data() return self._dummy_file @property def UpperCamelCase__ ( self : Dict ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def UpperCamelCase__ ( self : Optional[int] ): return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def UpperCamelCase__ ( self : int ): _a = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _a = cached_path( __a , cache_dir=self.cache_dir , extract_compressed_file=__a , force_extract=__a ) return os.path.join(__a , self.dummy_file_name ) @property def UpperCamelCase__ ( self : Tuple ): return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def UpperCamelCase__ ( self : Union[str, Any] ): if self._bucket_url is None: _a = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def UpperCamelCase__ ( self : Optional[int] ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def UpperCamelCase__ ( self : Dict , __a : Tuple , *__a : Tuple ): if self.load_existing_dummy_data: # dummy data is downloaded and tested _a = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _a = self.dummy_file_name # special case when data_url is a dict if isinstance(__a , __a ): return self.create_dummy_data_dict(__a , __a ) elif isinstance(__a , (list, tuple) ): return self.create_dummy_data_list(__a , __a ) else: return self.create_dummy_data_single(__a , __a ) def UpperCamelCase__ ( self : Any , __a : int , *__a : int ): return self.download_and_extract(__a ) def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : int ): return self.download_and_extract(__a ) def UpperCamelCase__ ( self : Tuple , __a : List[Any] , *__a : Tuple , **__a : int ): return path def UpperCamelCase__ ( self : List[Any] ): return {} def UpperCamelCase__ ( self : str , __a : List[str] , __a : Union[str, Any] ): _a = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__a , __a ): for single_url in single_urls: download_callback(__a ) else: _a = single_urls download_callback(__a ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__a , __a ): _a = [os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) ) for x in single_urls] else: _a = single_urls _a = os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) ) _a = value # make sure that values are unique if all(isinstance(__a , __a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _a = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCamelCase__ ( self : Optional[Any] , __a : List[str] , __a : str ): _a = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _a = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __a ) ) for url in data_url ) _a = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _a = [data_url[0]] * len(__a ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__a ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _a = os.path.join(__a , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__a ) return dummy_data_list def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[int] , __a : List[Any] ): for download_callback in self.download_callbacks: download_callback(__a ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _a = os.path.join(__a , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__a ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCamelCase__ ( self : Dict ): pass def UpperCamelCase__ ( self : Dict ): pass def UpperCamelCase__ ( self : Optional[int] , __a : Any ): def _iter_archive_members(__a : Tuple ): # this preserves the order of the members inside the ZIP archive _a = Path(self.dummy_file ).parent _a = path.relative_to(__a ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _a = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__a ) _a = Path(__a ) _a = _iter_archive_members(__a ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__a ).as_posix(), file_path.open("rb" ) def UpperCamelCase__ ( self : Tuple , __a : Optional[int] ): if not isinstance(__a , __a ): _a = [paths] for path in paths: if os.path.isfile(__a ): if os.path.basename(__a ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__a ): if os.path.basename(__a ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__a ): if filename.startswith((".", "__") ): continue yield os.path.join(__a , __a )
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: if num < 0: return False _a = num _a = 0 while num > 0: _a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
63
1
'''simple docstring''' from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 lowerCAmelCase_ : Any = { # 1536-bit 5: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 2048-bit 14: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AACAA68FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 3072-bit 15: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 4096-bit 16: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199' + 'FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 6144-bit 17: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08' + '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B' + '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9' + 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6' + '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8' + 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C' + '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718' + '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D' + '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D' + 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226' + '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC' + 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26' + '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB' + '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2' + '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127' + 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406' + 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918' + 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151' + '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03' + 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F' + 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B' + 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632' + '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E' + '6DCC4024FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 8192-bit 18: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD' + 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831' + '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B' + 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF' + '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6' + 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3' + '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328' + '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C' + 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE' + '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4' + '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300' + '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568' + '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9' + '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B' + '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A' + '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36' + '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1' + 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92' + '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47' + '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71' + '60C980DD98EDD3DFFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, } class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Any , __a : int = 14 ): if group not in primes: raise ValueError("Unsupported Group" ) _a = primes[group]["prime"] _a = primes[group]["generator"] _a = int(hexlify(urandom(32 ) ) , base=16 ) def UpperCamelCase__ ( self : Dict ): return hex(self.__private_key )[2:] def UpperCamelCase__ ( self : Any ): _a = pow(self.generator , self.__private_key , self.prime ) return hex(__a )[2:] def UpperCamelCase__ ( self : Optional[Any] , __a : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(__a , (self.prime - 1) // 2 , self.prime ) == 1 ) def UpperCamelCase__ ( self : Optional[int] , __a : str ): _a = int(__a , base=16 ) if not self.is_valid_public_key(__a ): raise ValueError("Invalid public key" ) _a = pow(__a , self.__private_key , self.prime ) return shaaaa(str(__a ).encode() ).hexdigest() @staticmethod def UpperCamelCase__ ( __a : int , __a : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(__a , (prime - 1) // 2 , __a ) == 1 ) @staticmethod def UpperCamelCase__ ( __a : str , __a : str , __a : int = 14 ): _a = int(__a , base=16 ) _a = int(__a , base=16 ) _a = primes[group]["prime"] if not DiffieHellman.is_valid_public_key_static(__a , __a ): raise ValueError("Invalid public key" ) _a = pow(__a , __a , __a ) return shaaaa(str(__a ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
63
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[str] = [ 'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =ShapEPipeline __a =['prompt'] __a =['prompt'] __a =[ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] __a =False @property def UpperCamelCase__ ( self : Optional[Any] ): return 32 @property def UpperCamelCase__ ( self : Union[str, Any] ): return 32 @property def UpperCamelCase__ ( self : List[Any] ): return self.time_input_dim * 4 @property def UpperCamelCase__ ( self : Optional[Any] ): return 8 @property def UpperCamelCase__ ( self : Any ): _a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def UpperCamelCase__ ( self : Union[str, Any] ): torch.manual_seed(0 ) _a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(__a ) @property def UpperCamelCase__ ( self : int ): torch.manual_seed(0 ) _a = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } _a = PriorTransformer(**__a ) return model @property def UpperCamelCase__ ( self : Any ): torch.manual_seed(0 ) _a = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } _a = ShapERenderer(**__a ) return model def UpperCamelCase__ ( self : Tuple ): _a = self.dummy_prior _a = self.dummy_text_encoder _a = self.dummy_tokenizer _a = self.dummy_renderer _a = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , ) _a = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def UpperCamelCase__ ( self : int , __a : List[str] , __a : str=0 ): if str(__a ).startswith("mps" ): _a = torch.manual_seed(__a ) else: _a = torch.Generator(device=__a ).manual_seed(__a ) _a = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def UpperCamelCase__ ( self : Dict ): _a = "cpu" _a = self.get_dummy_components() _a = self.pipeline_class(**__a ) _a = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) _a = pipe(**self.get_dummy_inputs(__a ) ) _a = output.images[0] _a = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) _a = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase__ ( self : List[str] ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__ ( self : str ): _a = torch_device == "cpu" _a = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__a , relax_max_difference=__a , ) def UpperCamelCase__ ( self : Tuple ): _a = self.get_dummy_components() _a = self.pipeline_class(**__a ) _a = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) _a = 1 _a = 2 _a = self.get_dummy_inputs(__a ) for key in inputs.keys(): if key in self.batch_params: _a = batch_size * [inputs[key]] _a = pipe(**__a , num_images_per_prompt=__a )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self : int ): _a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy" ) _a = ShapEPipeline.from_pretrained("openai/shap-e" ) _a = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) _a = torch.Generator(device=__a ).manual_seed(0 ) _a = pipe( "a shark" , generator=__a , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__a , __a )
63
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase_ : Any = get_tests_dir('fixtures') lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json') class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] ): _a = 0 def UpperCamelCase__ ( self : str ): _a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: _a = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _a = AutoFeatureExtractor.from_pretrained(__a ).to_dict() config_dict.pop("feature_extractor_type" ) _a = WavaVecaFeatureExtractor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) # make sure private variable is not incorrectly saved _a = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Union[str, Any] ): with self.assertRaisesRegex( __a , "bert-base is not a local folder and is not a valid model identifier" ): _a = AutoFeatureExtractor.from_pretrained("bert-base" ) def UpperCamelCase__ ( self : Optional[Any] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" ) def UpperCamelCase__ ( self : List[Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): _a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def UpperCamelCase__ ( self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) def UpperCamelCase__ ( self : Any ): try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoFeatureExtractor.register(__a , __a ) # Now that the config is registered, it can be used as any other config with the auto-API _a = CustomFeatureExtractor.from_pretrained(__a ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase__ ( self : Tuple ): class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =True try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # If remote code is not set, the default is to use local _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
63
1
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def _lowerCamelCase ( lowercase : List[str] , lowercase : Optional[Any] , lowercase : Dict ) -> Optional[Any]: _a = AutoConfig.from_pretrained(lowercase ) _a = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase ) _a = checkpoints.load_tax_checkpoint(lowercase ) _a = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": _a = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": _a = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global]." ) # Encoder for layer_index in range(config.num_layers ): _a = F'layers_{str(lowercase )}' # Self-Attention _a = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] _a = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] _a = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] _a = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization _a = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: _a = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] _a = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: _a = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] _a = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization _a = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning _a = flax_model.params["encoder"]["block"][str(lowercase )]["layer"] _a = tax_attention_key _a = tax_attention_out _a = tax_attention_query _a = tax_attention_value _a = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a = tax_global_layer_norm if split_mlp_wi: _a = tax_mlp_wi_a _a = tax_mlp_wi_a else: _a = tax_mlp_wi _a = tax_mlp_wo _a = tax_mlp_layer_norm _a = flax_model_encoder_layer_block # Only for layer 0: _a = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T _a = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T _a = tax_encoder_global_rel_embedding # Assigning _a = tax_model["target"]["encoder"]["encoder_norm"]["scale"] _a = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _a = F'layers_{str(lowercase )}' # Self-Attention _a = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] _a = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] _a = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] _a = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization _a = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention _a = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] _a = tax_enc_dec_attention_module["key"]["kernel"] _a = tax_enc_dec_attention_module["out"]["kernel"] _a = tax_enc_dec_attention_module["query"]["kernel"] _a = tax_enc_dec_attention_module["value"]["kernel"] # Layer Normalization _a = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: _a = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] _a = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: _a = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] _a = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization _a = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning _a = flax_model.params["decoder"]["block"][str(lowercase )]["layer"] _a = tax_attention_key _a = tax_attention_out _a = tax_attention_query _a = tax_attention_value _a = tax_pre_attention_layer_norm _a = tax_enc_dec_attention_key _a = tax_enc_dec_attention_out _a = tax_enc_dec_attention_query _a = tax_enc_dec_attention_value _a = tax_cross_layer_norm if split_mlp_wi: _a = tax_mlp_wi_a _a = tax_mlp_wi_a else: _a = tax_mlp_wi _a = tax_mlp_wo _a = txa_mlp_layer_norm _a = flax_model_decoder_layer_block # Decoder Normalization _a = tax_model["target"]["decoder"]["decoder_norm"]["scale"] _a = txa_decoder_norm # Only for layer 0: _a = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T _a = tax_decoder_rel_embedding # Token Embeddings _a = tax_model["target"]["token_embedder"]["embedding"] _a = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _a = tax_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(lowercase ) print("T5X Model was sucessfully converted!" ) if __name__ == "__main__": lowerCAmelCase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) lowerCAmelCase_ : Optional[Any] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
63
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : int = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='gpt_bigcode' __a =['past_key_values'] __a ={ 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ): _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = attention_softmax_in_fpaa _a = scale_attention_softmax_in_fpaa _a = multi_query _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
63
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ : Any = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Any = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Tuple = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Dict = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: _a = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
63
1
'''simple docstring''' def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : int = 0 , lowercase : int = 0 ) -> int: _a = right or len(lowercase ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(lowercase , lowercase , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
63
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='deta' __a ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(__a , __a ): _a = backbone_config.pop("model_type" ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(__a ) _a = backbone_config _a = num_queries _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = init_xavier_std _a = encoder_layerdrop _a = auxiliary_loss _a = position_embedding_type # deformable attributes _a = num_feature_levels _a = encoder_n_points _a = decoder_n_points _a = two_stage _a = two_stage_num_proposals _a = with_box_refine _a = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher _a = class_cost _a = bbox_cost _a = giou_cost # Loss coefficients _a = mask_loss_coefficient _a = dice_loss_coefficient _a = bbox_loss_coefficient _a = giou_loss_coefficient _a = eos_coefficient _a = focal_alpha super().__init__(is_encoder_decoder=__a , **__a ) @property def UpperCamelCase__ ( self : Optional[Any] ): return self.encoder_attention_heads @property def UpperCamelCase__ ( self : Dict ): return self.d_model def UpperCamelCase__ ( self : List[str] ): _a = copy.deepcopy(self.__dict__ ) _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output
63
1
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Tuple , __a : Union[str, Any] , ): _a = parent _a = 13 _a = 7 _a = 30 _a = self.seq_length + self.mem_len _a = 15 _a = True _a = True _a = 99 _a = [10, 50, 80] _a = 32 _a = 32 _a = 4 _a = 8 _a = 1_28 _a = 2 _a = 2 _a = None _a = 1 _a = 0 _a = 3 _a = self.vocab_size - 1 _a = 0.01 def UpperCamelCase__ ( self : int ): _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def UpperCamelCase__ ( self : Optional[Any] ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def UpperCamelCase__ ( self : Tuple , __a : List[Any] , __a : Tuple , __a : str , __a : int ): _a = TFTransfoXLModel(__a ) _a , _a = model(__a ).to_tuple() _a = {"input_ids": input_ids_a, "mems": mems_a} _a , _a = model(__a ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : Union[str, Any] ): _a = TFTransfoXLLMHeadModel(__a ) _a , _a = model(__a ).to_tuple() _a = {"input_ids": input_ids_a, "labels": lm_labels} _a , _a = model(__a ).to_tuple() _a , _a = model([input_ids_a, mems_a] ).to_tuple() _a = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels} _a , _a = model(__a ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCamelCase__ ( self : Dict , __a : List[Any] , __a : List[str] , __a : Any , __a : str ): _a = TFTransfoXLForSequenceClassification(__a ) _a = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self : str ): _a = self.prepare_config_and_inputs() ((_a) , (_a) , (_a) , (_a)) = config_and_inputs _a = {"input_ids": input_ids_a} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) __a =() if is_tf_available() else () __a =( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented __a =False __a =False __a =False __a =False def UpperCamelCase__ ( self : Union[str, Any] , __a : Union[str, Any] , __a : List[str] , __a : Tuple , __a : Optional[Any] , __a : Tuple ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def UpperCamelCase__ ( self : str ): _a = TFTransfoXLModelTester(self ) _a = ConfigTester(self , config_class=__a , d_embed=37 ) def UpperCamelCase__ ( self : Union[str, Any] ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self : Any ): self.model_tester.set_seed() _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*__a ) def UpperCamelCase__ ( self : str ): self.model_tester.set_seed() _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*__a ) def UpperCamelCase__ ( self : Optional[Any] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__a ) def UpperCamelCase__ ( self : Optional[Any] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: _a = model_class(__a ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: _a = model.get_output_embeddings() assert isinstance(__a , tf.keras.layers.Layer ) _a = model.get_bias() assert name is None else: _a = model.get_output_embeddings() assert x is None _a = model.get_bias() assert name is None def UpperCamelCase__ ( self : Optional[int] ): # TODO JP: Make TransfoXL XLA compliant pass @slow def UpperCamelCase__ ( self : Tuple ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a = TFTransfoXLModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." ) def UpperCamelCase__ ( self : Tuple ): pass @require_tf class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @unittest.skip("Skip test until #12651 is resolved." ) @slow def UpperCamelCase__ ( self : Optional[int] ): _a = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" ) # fmt: off _a = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off _a = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> _a = model.generate(__a , max_length=2_00 , do_sample=__a ) self.assertListEqual(output_ids[0].numpy().tolist() , __a )
63
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]: _a = AutoTokenizer.from_pretrained(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase ) _a = tok.pad_token_id def get_lens(lowercase : Optional[int] ): _a = tqdm( DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a = [] for batch in dl: _a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist() _a = batch["labels"].ne(lowercase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowercase , lowercase ): max_lens.append(max(lowercase , lowercase ) ) else: max_lens.extend(lowercase ) return max_lens _a = get_lens(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase ) _a = get_lens(lowercase ) pickle_save(lowercase , train_ds.len_file ) pickle_save(lowercase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
63
1
'''simple docstring''' import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def _lowerCamelCase ( lowercase : str , lowercase : Dict , lowercase : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : Any=None , lowercase : str=None , lowercase : Optional[int]=None , ) -> str: if attention_mask is None: _a = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _a = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _a = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowercase ) if decoder_head_mask is None: _a = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase ) if cross_attn_head_mask is None: _a = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : List[Any] , __a : List[str] , __a : Tuple=13 , __a : str=7 , __a : Any=True , __a : Union[str, Any]=False , __a : int=99 , __a : Tuple=16 , __a : Any=2 , __a : Tuple=4 , __a : int=4 , __a : Tuple="relu" , __a : List[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : Any=0.0 , __a : Union[str, Any]=0.0 , __a : List[Any]=20 , __a : List[Any]=2 , __a : Optional[Any]=1 , __a : int=0 , ): _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = encoder_layerdrop _a = decoder_layerdrop _a = max_position_embeddings _a = eos_token_id _a = pad_token_id _a = bos_token_id def UpperCamelCase__ ( self : str ): _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = self.eos_token_id # Eos Token _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _a = input_ids.clamp(self.pad_token_id + 1 ) _a = decoder_input_ids.clamp(self.pad_token_id + 1 ) _a = self.get_config() _a = prepare_mam_aaa_inputs_dict(__a , __a , __a ) return config, inputs_dict def UpperCamelCase__ ( self : List[str] ): return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def UpperCamelCase__ ( self : List[Any] ): _a , _a = self.prepare_config_and_inputs() return config, inputs_dict def UpperCamelCase__ ( self : int , __a : Dict , __a : Dict ): _a = MaMaaaModel(config=__a ).get_decoder().to(__a ).eval() _a = inputs_dict["input_ids"] _a = inputs_dict["attention_mask"] _a = inputs_dict["head_mask"] # first forward pass _a = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) _a , _a = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids _a = ids_tensor((self.batch_size, 3) , config.vocab_size ) _a = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and _a = torch.cat([input_ids, next_tokens] , dim=-1 ) _a = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) _a = model(__a , attention_mask=__a )["last_hidden_state"] _a = model(__a , attention_mask=__a , past_key_values=__a )[ "last_hidden_state" ] # select random slice _a = ids_tensor((1,) , output_from_past.shape[-1] ).item() _a = output_from_no_past[:, -3:, random_slice_idx].detach() _a = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-2 ) ) def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[int] ): _a = MaMaaaModel(config=__a ).to(__a ).eval() _a = model(**__a ) _a = outputs.encoder_last_hidden_state _a = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _a = model.get_encoder() encoder.save_pretrained(__a ) _a = MaMaaaEncoder.from_pretrained(__a ).to(__a ) _a = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: _a = model.get_decoder() decoder.save_pretrained(__a ) _a = MaMaaaDecoder.from_pretrained(__a ).to(__a ) _a = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__a , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) __a =(MaMaaaForConditionalGeneration,) if is_torch_available() else () __a =( { 'conversational': MaMaaaForConditionalGeneration, 'feature-extraction': MaMaaaModel, 'summarization': MaMaaaForConditionalGeneration, 'text2text-generation': MaMaaaForConditionalGeneration, 'translation': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) __a =True __a =True __a =False __a =False def UpperCamelCase__ ( self : int , __a : str , __a : str , __a : List[str] , __a : str , __a : List[str] ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def UpperCamelCase__ ( self : Dict ): _a = MaMaaaModelTester(self ) _a = ConfigTester(self , config_class=__a ) def UpperCamelCase__ ( self : str ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self : Dict ): _a , _a = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _a = model_class(__a ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a ) _a , _a = model_class.from_pretrained(__a , output_loading_info=__a ) self.assertEqual(info["missing_keys"] , [] ) def UpperCamelCase__ ( self : List[str] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a ) def UpperCamelCase__ ( self : List[str] ): _a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__a ) def UpperCamelCase__ ( self : Optional[int] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): _a = model_class(__a ) model.to(__a ) model.eval() _a = copy.deepcopy(self._prepare_for_class(__a , __a ) ) if not self.is_encoder_decoder: _a = inputs["input_ids"] del inputs["input_ids"] else: _a = inputs["input_ids"] _a = inputs.get("decoder_input_ids" , __a ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , __a ) _a = model.get_input_embeddings() if not self.is_encoder_decoder: _a = wte(__a ) else: _a = wte(__a ) _a = wte(__a ) with torch.no_grad(): model(**__a )[0] def UpperCamelCase__ ( self : List[str] ): _a , _a = self.model_tester.prepare_config_and_inputs() _a = input_dict["input_ids"] _a = input_ids.ne(1 ).to(__a ) _a = MaMaaaForConditionalGeneration(__a ).eval().to(__a ) if torch_device == "cuda": model.half() model.generate(__a , attention_mask=__a ) model.generate(num_beams=4 , do_sample=__a , early_stopping=__a , num_return_sequences=3 ) def _lowerCamelCase ( lowercase : Any ) -> List[Any]: return torch.tensor(lowercase , dtype=torch.long , device=lowercase ) lowerCAmelCase_ : List[str] = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @cached_property def UpperCamelCase__ ( self : Union[str, Any] ): return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def UpperCamelCase__ ( self : Optional[Any] ): _a = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__a ) _a = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) _a = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) _a = prepare_mam_aaa_inputs_dict(model.config , __a , __a ) with torch.no_grad(): _a = model(**__a )[0] _a = torch.Size((1, 11, 10_24) ) self.assertEqual(output.shape , __a ) # change to expected output here _a = torch.tensor( [[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__a ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__a ) # change to intended input _a = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) _a = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) _a = prepare_mam_aaa_inputs_dict(model.config , __a , __a ) with torch.no_grad(): _a = model(**__a )[0] _a = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , __a ) # change to expected output here _a = torch.tensor( [[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__a ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__a ) _a = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) _a = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams _a = tokenizer(__a , padding=__a , return_tensors="pt" ) _a = model.generate( input_ids=dct["input_ids"].to(__a ) , attention_mask=dct["attention_mask"].to(__a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) _a = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] _a = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__a , skip_special_tokens=__a ) assert generated == expected_en
63
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): _a = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): # pass variant but use the non-variant filenames _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Dict ): _a = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : List[str] ): # pass variant but use the non-variant filenames _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[int] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
63
1
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCAmelCase_ : Dict = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCAmelCase_ : Any = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCAmelCase_ : Optional[Any] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCAmelCase_ : Tuple = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE (datasets.Metric ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[ "https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score", "https://en.wikipedia.org/wiki/METEOR", ] , ) def UpperCamelCase__ ( self : Union[str, Any] , __a : int ): import nltk nltk.download("wordnet" ) if NLTK_VERSION >= version.Version("3.6.5" ): nltk.download("punkt" ) if NLTK_VERSION >= version.Version("3.6.6" ): nltk.download("omw-1.4" ) def UpperCamelCase__ ( self : List[Any] , __a : Optional[Any] , __a : int , __a : Any=0.9 , __a : int=3 , __a : Union[str, Any]=0.5 ): if NLTK_VERSION >= version.Version("3.6.5" ): _a = [ meteor_score.single_meteor_score( word_tokenize(__a ) , word_tokenize(__a ) , alpha=__a , beta=__a , gamma=__a ) for ref, pred in zip(__a , __a ) ] else: _a = [ meteor_score.single_meteor_score(__a , __a , alpha=__a , beta=__a , gamma=__a ) for ref, pred in zip(__a , __a ) ] return {"meteor": np.mean(__a )}
63
'''simple docstring''' def _lowerCamelCase ( lowercase : bytes ) -> str: return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] ) def _lowerCamelCase ( lowercase : str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(lowercase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(lowercase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
63
1
'''simple docstring''' import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig lowerCAmelCase_ : Dict = logging.get_logger(__name__) # General docstring lowerCAmelCase_ : Any = 'PoolFormerConfig' # Base docstring lowerCAmelCase_ : Union[str, Any] = 'sail/poolformer_s12' lowerCAmelCase_ : str = [1, 5_12, 7, 7] # Image classification docstring lowerCAmelCase_ : Dict = 'sail/poolformer_s12' lowerCAmelCase_ : List[str] = 'tabby, tabby cat' lowerCAmelCase_ : str = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def _lowerCamelCase ( lowercase : List[Any] , lowercase : float = 0.0 , lowercase : bool = False ) -> str: if drop_prob == 0.0 or not training: return input _a = 1 - drop_prob _a = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets _a = keep_prob + torch.rand(lowercase , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize _a = input.div(lowercase ) * random_tensor return output class __SCREAMING_SNAKE_CASE (nn.Module ): """simple docstring""" def __init__( self : int , __a : Optional[float] = None ): super().__init__() _a = drop_prob def UpperCamelCase__ ( self : List[str] , __a : torch.Tensor ): return drop_path(__a , self.drop_prob , self.training ) def UpperCamelCase__ ( self : Union[str, Any] ): return "p={}".format(self.drop_prob ) class __SCREAMING_SNAKE_CASE (nn.Module ): """simple docstring""" def __init__( self : Any , __a : Optional[Any] , __a : List[str] , __a : Tuple , __a : str , __a : Optional[int] , __a : List[Any]=None ): super().__init__() _a = patch_size if isinstance(__a , collections.abc.Iterable ) else (patch_size, patch_size) _a = stride if isinstance(__a , collections.abc.Iterable ) else (stride, stride) _a = padding if isinstance(__a , collections.abc.Iterable ) else (padding, padding) _a = nn.Convad(__a , __a , kernel_size=__a , stride=__a , padding=__a ) _a = norm_layer(__a ) if norm_layer else nn.Identity() def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[Any] ): _a = self.projection(__a ) _a = self.norm(__a ) return embeddings class __SCREAMING_SNAKE_CASE (nn.GroupNorm ): """simple docstring""" def __init__( self : Tuple , __a : Dict , **__a : Union[str, Any] ): super().__init__(1 , __a , **__a ) class __SCREAMING_SNAKE_CASE (nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , __a : int ): super().__init__() _a = nn.AvgPoolad(__a , stride=1 , padding=pool_size // 2 , count_include_pad=__a ) def UpperCamelCase__ ( self : Dict , __a : List[Any] ): return self.pool(__a ) - hidden_states class __SCREAMING_SNAKE_CASE (nn.Module ): """simple docstring""" def __init__( self : int , __a : Union[str, Any] , __a : List[Any] , __a : List[str] , __a : str ): super().__init__() _a = nn.Convad(__a , __a , 1 ) _a = nn.Convad(__a , __a , 1 ) _a = PoolFormerDropPath(__a ) if isinstance(config.hidden_act , __a ): _a = ACTaFN[config.hidden_act] else: _a = config.hidden_act def UpperCamelCase__ ( self : Union[str, Any] , __a : str ): _a = self.conva(__a ) _a = self.act_fn(__a ) _a = self.drop(__a ) _a = self.conva(__a ) _a = self.drop(__a ) return hidden_states class __SCREAMING_SNAKE_CASE (nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , __a : Optional[Any] , __a : Any , __a : List[Any] , __a : Dict , __a : Dict , __a : int ): super().__init__() _a = PoolFormerPooling(__a ) _a = PoolFormerOutput(__a , __a , __a , __a ) _a = PoolFormerGroupNorm(__a ) _a = PoolFormerGroupNorm(__a ) # Useful for training neural nets _a = PoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity() _a = config.use_layer_scale if config.use_layer_scale: _a = nn.Parameter( config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a ) _a = nn.Parameter( config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a ) def UpperCamelCase__ ( self : int , __a : Union[str, Any] ): if self.use_layer_scale: _a = self.pooling(self.before_norm(__a ) ) _a = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection _a = hidden_states + self.drop_path(__a ) _a = () _a = self.output(self.after_norm(__a ) ) _a = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection _a = hidden_states + self.drop_path(__a ) _a = (output,) + outputs return outputs else: _a = self.drop_path(self.pooling(self.before_norm(__a ) ) ) # First residual connection _a = pooling_output + hidden_states _a = () # Second residual connection inside the PoolFormerOutput block _a = self.drop_path(self.output(self.after_norm(__a ) ) ) _a = hidden_states + layer_output _a = (output,) + outputs return outputs class __SCREAMING_SNAKE_CASE (nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , __a : Optional[int] ): super().__init__() _a = config # stochastic depth decay rule _a = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings _a = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) _a = nn.ModuleList(__a ) # Transformer blocks _a = [] _a = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers _a = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__a ) ) _a = nn.ModuleList(__a ) def UpperCamelCase__ ( self : Union[str, Any] , __a : Any , __a : List[Any]=False , __a : Union[str, Any]=True ): _a = () if output_hidden_states else None _a = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): _a , _a = layers # Get patch embeddings from hidden_states _a = embedding_layer(__a ) # Send the embeddings through the blocks for _, blk in enumerate(__a ): _a = blk(__a ) _a = layer_outputs[0] if output_hidden_states: _a = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =PoolFormerConfig __a ='poolformer' __a ='pixel_values' __a =True def UpperCamelCase__ ( self : List[Any] , __a : Union[str, Any] ): if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : Optional[int]=False ): if isinstance(__a , __a ): _a = value lowerCAmelCase_ : List[Any] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCAmelCase_ : Optional[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( 'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , lowerCamelCase_ , ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : str , __a : Optional[Any] ): super().__init__(__a ) _a = config _a = PoolFormerEncoder(__a ) # Initialize weights and apply final processing self.post_init() def UpperCamelCase__ ( self : str ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[torch.FloatTensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ): _a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _a = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) _a = self.encoder( __a , output_hidden_states=__a , return_dict=__a , ) _a = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__a , hidden_states=encoder_outputs.hidden_states , ) class __SCREAMING_SNAKE_CASE (nn.Module ): """simple docstring""" def __init__( self : Any , __a : int ): super().__init__() _a = nn.Linear(config.hidden_size , config.hidden_size ) def UpperCamelCase__ ( self : Optional[int] , __a : Optional[int] ): _a = self.dense(__a ) return output @add_start_docstrings( '\n PoolFormer Model transformer with an image classification head on top\n ' , lowerCamelCase_ , ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Dict , __a : Optional[Any] ): super().__init__(__a ) _a = config.num_labels _a = PoolFormerModel(__a ) # Final norm _a = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head _a = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase__ ( self : Any , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.LongTensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ): _a = return_dict if return_dict is not None else self.config.use_return_dict _a = self.poolformer( __a , output_hidden_states=__a , return_dict=__a , ) _a = outputs[0] _a = self.classifier(self.norm(__a ).mean([-2, -1] ) ) _a = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _a = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _a = "single_label_classification" else: _a = "multi_label_classification" if self.config.problem_type == "regression": _a = MSELoss() if self.num_labels == 1: _a = loss_fct(logits.squeeze() , labels.squeeze() ) else: _a = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": _a = CrossEntropyLoss() _a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _a = BCEWithLogitsLoss() _a = loss_fct(__a , __a ) if not return_dict: _a = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
63
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str: for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any: model.train() _a = model(lowercase ) _a = F.mse_loss(lowercase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase ) def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]: set_seed(42 ) _a = RegressionModel() _a = deepcopy(lowercase ) _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) model.to(accelerator.device ) if sched: _a = AdamW(params=model.parameters() , lr=1E-3 ) _a = AdamW(params=ddp_model.parameters() , lr=1E-3 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) # Make a copy of `model` if sched: _a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase ) else: _a , _a = accelerator.prepare(lowercase , lowercase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]: # Test when on a single CPU or GPU that the context manager does nothing _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase , lowercase , lowercase , lowercase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : Tuple ) -> Tuple: # Test on distributed setup that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] GradientState._reset_state() def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' _a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase )) if accelerator.num_processes > 1: check_model_parameters(lowercase , lowercase , lowercase , lowercase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def _lowerCamelCase ( ) -> Any: _a = Accelerator() _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) _a = RegressionDataset(length=96 ) _a = DataLoader(lowercase , batch_size=16 ) _a , _a = accelerator.prepare(lowercase , lowercase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if iteration < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if batch_num < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _lowerCamelCase ( ) -> Optional[Any]: _a = Accelerator() _a = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(lowercase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(lowercase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(lowercase , lowercase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase ) def _lowerCamelCase ( lowercase : Any ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
63
1
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() lowerCAmelCase_ : str = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } lowerCAmelCase_ : Optional[int] = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : str ) -> int: for attribute in key.split("." ): _a = getattr(lowercase , lowercase ) if weight_type is not None: _a = getattr(lowercase , lowercase ).shape else: _a = hf_pointer.shape assert hf_shape == value.shape, ( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value else: _a = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCamelCase ( lowercase : int , lowercase : str ) -> Optional[Any]: _a = [] _a = fairseq_model.state_dict() _a = hf_model.feature_extractor for name, value in fairseq_dict.items(): _a = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == "group" , ) _a = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _a = True if "*" in mapped_key: _a = name.split(lowercase )[0].split("." )[-2] _a = mapped_key.replace("*" , lowercase ) if "weight_g" in name: _a = "weight_g" elif "weight_v" in name: _a = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: _a = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _a = "weight" else: _a = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(F'Unused weights: {unused_weights}' ) def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Dict ) -> Any: _a = full_name.split("conv_layers." )[-1] _a = name.split("." ) _a = int(items[0] ) _a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _a = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _a = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) _a = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) _a = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowercase ) @torch.no_grad() def _lowerCamelCase ( lowercase : int , lowercase : str , lowercase : List[Any]=None ) -> Union[str, Any]: # load the pre-trained checkpoints _a = torch.load(lowercase ) _a = WavLMConfigOrig(checkpoint["cfg"] ) _a = WavLMOrig(lowercase ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: _a = WavLMConfig.from_pretrained(lowercase ) else: _a = WavLMConfig() _a = WavLMModel(lowercase ) recursively_load_weights(lowercase , lowercase ) hf_wavlm.save_pretrained(lowercase ) if __name__ == "__main__": lowerCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') lowerCAmelCase_ : List[Any] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
63
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : List[str] = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='trocr' __a =['past_key_values'] __a ={ 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ): _a = vocab_size _a = d_model _a = decoder_layers _a = decoder_attention_heads _a = decoder_ffn_dim _a = activation_function _a = max_position_embeddings _a = dropout _a = attention_dropout _a = activation_dropout _a = init_std _a = decoder_layerdrop _a = use_cache _a = scale_embedding _a = use_learned_position_embeddings _a = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
63
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , __a : Dict , __a : List[str]=7 , __a : int=3 , __a : Optional[int]=30 , __a : Optional[Any]=4_00 , __a : List[str]=True , __a : Tuple=None , __a : Optional[int]=True , __a : Dict=1 / 2_55 , __a : Any=True , __a : Optional[int]=[0.5, 0.5, 0.5] , __a : Any=[0.5, 0.5, 0.5] , __a : Any=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33} _a = parent _a = batch_size _a = num_channels _a = min_resolution _a = max_resolution _a = do_resize _a = size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean _a = image_std _a = do_pad def UpperCamelCase__ ( self : Optional[int] ): return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def UpperCamelCase__ ( self : List[str] , __a : List[Any] , __a : List[str]=False ): if not batched: _a = image_inputs[0] if isinstance(__a , Image.Image ): _a , _a = image.size else: _a , _a = image.shape[1], image.shape[2] if w < h: _a = int(self.size["shortest_edge"] * h / w ) _a = self.size["shortest_edge"] elif w > h: _a = self.size["shortest_edge"] _a = int(self.size["shortest_edge"] * w / h ) else: _a = self.size["shortest_edge"] _a = self.size["shortest_edge"] else: _a = [] for image in image_inputs: _a , _a = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _a = max(__a , key=lambda __a : item[0] )[0] _a = max(__a , key=lambda __a : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =DetrImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self : Optional[Any] ): _a = DetrImageProcessingTester(self ) @property def UpperCamelCase__ ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self : Dict ): _a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , "image_mean" ) ) self.assertTrue(hasattr(__a , "image_std" ) ) self.assertTrue(hasattr(__a , "do_normalize" ) ) self.assertTrue(hasattr(__a , "do_rescale" ) ) self.assertTrue(hasattr(__a , "rescale_factor" ) ) self.assertTrue(hasattr(__a , "do_resize" ) ) self.assertTrue(hasattr(__a , "size" ) ) self.assertTrue(hasattr(__a , "do_pad" ) ) def UpperCamelCase__ ( self : Tuple ): _a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} ) self.assertEqual(image_processor.do_pad , __a ) _a = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__a ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , __a ) def UpperCamelCase__ ( self : Dict ): pass def UpperCamelCase__ ( self : Union[str, Any] ): # Initialize image_processing _a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input _a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _a , _a = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _a , _a = self.image_processor_tester.get_expected_values(__a , batched=__a ) _a = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase__ ( self : int ): # Initialize image_processing _a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input _a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _a , _a = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _a = image_processing(__a , return_tensors="pt" ).pixel_values _a , _a = self.image_processor_tester.get_expected_values(__a , batched=__a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase__ ( self : List[str] ): # Initialize image_processing _a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input _a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _a , _a = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _a = image_processing(__a , return_tensors="pt" ).pixel_values _a , _a = self.image_processor_tester.get_expected_values(__a , batched=__a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCamelCase__ ( self : List[Any] ): # prepare image and target _a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: _a = json.loads(f.read() ) _a = {"image_id": 3_97_69, "annotations": target} # encode them _a = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" ) _a = image_processing(images=__a , annotations=__a , return_tensors="pt" ) # verify pixel values _a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __a ) _a = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1e-4 ) ) # verify area _a = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) ) # verify boxes _a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __a ) _a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1e-3 ) ) # verify image_id _a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) ) # verify is_crowd _a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) ) # verify class_labels _a = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) ) # verify orig_size _a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) ) # verify size _a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) ) @slow def UpperCamelCase__ ( self : List[str] ): # prepare image, target and masks_path _a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: _a = json.loads(f.read() ) _a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target} _a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them _a = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" ) _a = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors="pt" ) # verify pixel values _a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __a ) _a = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1e-4 ) ) # verify area _a = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) ) # verify boxes _a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __a ) _a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1e-3 ) ) # verify image_id _a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) ) # verify is_crowd _a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) ) # verify class_labels _a = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) ) # verify masks _a = 82_28_73 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __a ) # verify orig_size _a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) ) # verify size _a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) )
63
'''simple docstring''' import argparse import os import re lowerCAmelCase_ : Any = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"') def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]: with open(lowercase , "r" , encoding="utf-8" ) as f: _a = f.read() _a = content.split("\n" ) _a = [] _a = 0 while line_idx < len(lowercase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: _a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 _a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": _a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers _a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowercase , "w" , encoding="utf-8" ) as f: f.write("\n".join(lowercase ) ) elif "\n".join(lowercase ) != content: return True def _lowerCamelCase ( lowercase : bool = False ) -> List[str]: _a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )] _a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames] if not overwrite and any(lowercase ): _a = [f for f, d in zip(lowercase , lowercase ) if d] raise ValueError( F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix' " this." ) if __name__ == "__main__": lowerCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') lowerCAmelCase_ : Optional[int] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
63
1
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ("foo.json",)] ) def UpperCamelCase__ ( self : Tuple , __a : Optional[Any] ): _a = GenerationConfig( do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__a , config_name=__a ) _a = GenerationConfig.from_pretrained(__a , config_name=__a ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __a ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , __a ) def UpperCamelCase__ ( self : Union[str, Any] ): _a = AutoConfig.from_pretrained("gpt2" ) _a = GenerationConfig.from_model_config(__a ) _a = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__a , __a ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def UpperCamelCase__ ( self : int ): _a = GenerationConfig() _a = { "max_new_tokens": 10_24, "foo": "bar", } _a = copy.deepcopy(__a ) _a = generation_config.update(**__a ) # update_kwargs was not modified (no side effects) self.assertEqual(__a , __a ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 10_24 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__a , {"foo": "bar"} ) def UpperCamelCase__ ( self : Dict ): _a = GenerationConfig() _a = "bar" with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir: generation_config.save_pretrained(__a ) _a = GenerationConfig.from_pretrained(__a ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar" ) _a = GenerationConfig.from_model_config(__a ) assert not hasattr(__a , "foo" ) # no new kwargs should be initialized if from config def UpperCamelCase__ ( self : Dict ): _a = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , __a ) self.assertEqual(default_config.num_beams , 1 ) _a = GenerationConfig( do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , __a ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__a ) _a = GenerationConfig.from_pretrained(__a , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , __a ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @classmethod def UpperCamelCase__ ( cls : Union[str, Any] ): _a = TOKEN HfFolder.save_token(__a ) @classmethod def UpperCamelCase__ ( cls : Optional[Any] ): try: delete_repo(token=cls._token , repo_id="test-generation-config" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" ) except HTTPError: pass def UpperCamelCase__ ( self : Optional[Any] ): _a = GenerationConfig( do_sample=__a , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token ) _a = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__a , getattr(__a , __a ) ) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __a , repo_id="test-generation-config" , push_to_hub=__a , use_auth_token=self._token ) _a = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__a , getattr(__a , __a ) ) def UpperCamelCase__ ( self : int ): _a = GenerationConfig( do_sample=__a , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token ) _a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__a , getattr(__a , __a ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __a , repo_id="valid_org/test-generation-config-org" , push_to_hub=__a , use_auth_token=self._token ) _a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__a , getattr(__a , __a ) )
63
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='big_bird' def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ): super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) _a = vocab_size _a = max_position_embeddings _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = type_vocab_size _a = layer_norm_eps _a = use_cache _a = rescale_embeddings _a = attention_type _a = use_bias _a = block_size _a = num_random_blocks _a = classifier_dropout class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property def UpperCamelCase__ ( self : Optional[int] ): if self.task == "multiple-choice": _a = {0: "batch", 1: "choice", 2: "sequence"} else: _a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
63
1
'''simple docstring''' from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
63
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" @register_to_config def __init__( self : Dict , *, __a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ): super().__init__() _a = nn.Parameter(torch.zeros(__a ) ) # parameters for additional clip time embeddings _a = nn.Linear(__a , __a ) _a = nn.Linear(__a , __a ) # parameters for encoder hidden states _a = clip_extra_context_tokens _a = nn.Linear( __a , self.clip_extra_context_tokens * cross_attention_dim ) _a = nn.Linear(__a , __a ) _a = nn.LayerNorm(__a ) def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _a = image_embeddings.shape[0] _a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _a = classifier_free_guidance_embeddings.expand( __a , -1 ) _a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _a = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _a = self.embedding_proj(__a ) _a = self.clip_image_embeddings_project_to_time_embeddings(__a ) _a = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _a = self.clip_extra_context_tokens_proj(__a ) _a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens ) _a = clip_extra_context_tokens.permute(0 , 2 , 1 ) _a = self.encoder_hidden_states_proj(__a ) _a = self.text_encoder_hidden_states_norm(__a ) _a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
63
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : str = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='speech_to_text' __a =['past_key_values'] __a ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , __a : Dict=1_00_00 , __a : Any=12 , __a : Optional[Any]=20_48 , __a : List[str]=4 , __a : Any=6 , __a : str=20_48 , __a : Optional[Any]=4 , __a : Tuple=0.0 , __a : str=0.0 , __a : int=True , __a : Union[str, Any]=True , __a : Any="relu" , __a : Optional[int]=2_56 , __a : Optional[Any]=0.1 , __a : Optional[int]=0.0 , __a : List[Any]=0.0 , __a : Dict=0.02 , __a : Any=2 , __a : Union[str, Any]=True , __a : Optional[Any]=1 , __a : Dict=0 , __a : Optional[int]=2 , __a : Dict=60_00 , __a : Dict=10_24 , __a : Optional[int]=2 , __a : int=(5, 5) , __a : int=10_24 , __a : str=80 , __a : Any=1 , **__a : Dict , ): _a = vocab_size _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = encoder_layerdrop _a = decoder_layerdrop _a = use_cache _a = encoder_layers _a = scale_embedding # scale factor will be sqrt(d_model) if True _a = max_source_positions _a = max_target_positions _a = num_conv_layers _a = list(__a ) _a = conv_channels _a = input_feat_per_channel _a = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` " f'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ' f'`config.num_conv_layers = {self.num_conv_layers}`.' ) super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
63
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _lowerCamelCase ( lowercase : Dict ) -> Any: _a = filter(lambda lowercase : p.requires_grad , model.parameters() ) _a = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_ : int = logging.getLogger(__name__) def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any: if metric == "rouge2": _a = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": _a = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": _a = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' " function." ) _a = ModelCheckpoint( dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]: return EarlyStopping( monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , ) class __SCREAMING_SNAKE_CASE (pl.Callback ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): _a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__a ) @rank_zero_only def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ): logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) _a = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results _a = Path(pl_module.hparams.output_dir ) if type_path == "test": _a = od / "test_results.txt" _a = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _a = od / f'{type_path}_results/{trainer.global_step:05d}.txt' _a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=__a ) generations_file.parent.mkdir(exist_ok=__a ) with open(__a , "a+" ) as writer: for key in sorted(__a ): if key in ["log", "progress_bar", "preds"]: continue _a = metrics[key] if isinstance(__a , torch.Tensor ): _a = val.item() _a = f'{key}: {val:.6f}\n' writer.write(__a ) if not save_generations: return if "preds" in metrics: _a = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__a ) @rank_zero_only def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ): try: _a = pl_module.model.model.num_parameters() except AttributeError: _a = pl_module.model.num_parameters() _a = count_trainable_parameters(__a ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} ) @rank_zero_only def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__a , __a , "test" ) @rank_zero_only def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
63
1
'''simple docstring''' from __future__ import annotations from typing import TypedDict class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =42 __a =42 def _lowerCamelCase ( lowercase : str ) -> list[str]: if not isinstance(lowercase , lowercase ): raise TypeError("The parameter s type must be str." ) return [s[i:] + s[:i] for i in range(len(lowercase ) )] def _lowerCamelCase ( lowercase : str ) -> BWTTransformDict: if not isinstance(lowercase , lowercase ): raise TypeError("The parameter s type must be str." ) if not s: raise ValueError("The parameter s must not be empty." ) _a = all_rotations(lowercase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _a = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(lowercase ), } return response def _lowerCamelCase ( lowercase : str , lowercase : int ) -> str: if not isinstance(lowercase , lowercase ): raise TypeError("The parameter bwt_string type must be str." ) if not bwt_string: raise ValueError("The parameter bwt_string must not be empty." ) try: _a = int(lowercase ) except ValueError: raise TypeError( "The parameter idx_original_string type must be int or passive" " of cast to int." ) if idx_original_string < 0: raise ValueError("The parameter idx_original_string must not be lower than 0." ) if idx_original_string >= len(lowercase ): raise ValueError( "The parameter idx_original_string must be lower than" " len(bwt_string)." ) _a = [""] * len(lowercase ) for _ in range(len(lowercase ) ): for i in range(len(lowercase ) ): _a = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCAmelCase_ : str = 'Provide a string that I will generate its BWT transform: ' lowerCAmelCase_ : List[str] = input(entry_msg).strip() lowerCAmelCase_ : List[str] = bwt_transform(s) print( f"""Burrows Wheeler transform for string '{s}' results """ f"""in '{result['bwt_string']}'""" ) lowerCAmelCase_ : Optional[Any] = reverse_bwt(result['bwt_string'], result['idx_original_string']) print( f"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """ f"""we get original string '{original_string}'""" )
63
'''simple docstring''' import math class __SCREAMING_SNAKE_CASE : """simple docstring""" def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ): _a = 0.0 _a = 0.0 for i in range(len(__a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ): for i in range(len(__a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _lowerCamelCase ( ) -> None: # Training Examples ( m, n ) _a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _a = SelfOrganizingMap() _a = 3 _a = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _a = training_samples[j] # Compute the winning vector _a = self_organizing_map.get_winner(lowercase , lowercase ) # Update the winning vector _a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase ) # classify test sample _a = [0, 0, 0, 1] _a = self_organizing_map.get_winner(lowercase , lowercase ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
63
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : Optional[Any] = { 'microsoft/swin-tiny-patch4-window7-224': ( 'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json' ), # See all Swin models at https://huggingface.co/models?filter=swin } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" __a ='swin' __a ={ 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , __a : Optional[Any]=2_24 , __a : List[str]=4 , __a : List[Any]=3 , __a : Optional[Any]=96 , __a : str=[2, 2, 6, 2] , __a : Tuple=[3, 6, 12, 24] , __a : List[str]=7 , __a : Tuple=4.0 , __a : Optional[Any]=True , __a : Optional[Any]=0.0 , __a : Any=0.0 , __a : Tuple=0.1 , __a : Tuple="gelu" , __a : Union[str, Any]=False , __a : Optional[int]=0.02 , __a : Tuple=1e-5 , __a : List[str]=32 , __a : int=None , __a : Dict=None , **__a : Any , ): super().__init__(**__a ) _a = image_size _a = patch_size _a = num_channels _a = embed_dim _a = depths _a = len(__a ) _a = num_heads _a = window_size _a = mlp_ratio _a = qkv_bias _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = drop_path_rate _a = hidden_act _a = use_absolute_embeddings _a = layer_norm_eps _a = initializer_range _a = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _a = int(embed_dim * 2 ** (len(__a ) - 1) ) _a = ["stem"] + [f'stage{idx}' for idx in range(1 , len(__a ) + 1 )] _a , _a = get_aligned_output_features_output_indices( out_features=__a , out_indices=__a , stage_names=self.stage_names ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =version.parse('1.11' ) @property def UpperCamelCase__ ( self : List[str] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase__ ( self : str ): return 1e-4
63
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='OwlViTImageProcessor' __a =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )): _a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )] elif isinstance(__a , __a ) and isinstance(text[0] , __a ): _a = [] # Maximum number of queries across batch _a = max([len(__a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__a ) != max_num_queries: _a = t + [" "] * (max_num_queries - len(__a )) _a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a ) encodings.append(__a ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _a = BatchEncoding() _a = input_ids _a = attention_mask if query_images is not None: _a = BatchEncoding() _a = self.image_processor( __a , return_tensors=__a , **__a ).pixel_values _a = query_pixel_values if images is not None: _a = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: _a = image_features.pixel_values return encoding elif query_images is not None and images is not None: _a = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ): return self.image_processor.post_process(*__a , **__a ) def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ): return self.image_processor.post_process_object_detection(*__a , **__a ) def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ): return self.image_processor.post_process_image_guided_detection(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : str ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
1
'''simple docstring''' import math def _lowerCamelCase ( lowercase : int = 100 ) -> int: _a = sum(i * i for i in range(1 , n + 1 ) ) _a = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
63
'''simple docstring''' def _lowerCamelCase ( lowercase : str ) -> list: if n_term == "": return [] _a = [] for temp in range(int(lowercase ) ): series.append(F'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
63
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Tuple , *__a : Tuple , **__a : Dict ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : Any , **__a : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : List[str] , **__a : int ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : List[Any] , **__a : Any ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : Any , **__a : Dict ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[Any] , *__a : Dict , **__a : List[str] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : Optional[int] , **__a : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : Any , **__a : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Any , *__a : List[Any] , **__a : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[int] , *__a : List[str] , **__a : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : Dict , **__a : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : List[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[str] , *__a : str , **__a : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : int , *__a : Optional[int] , **__a : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[Any] , *__a : Dict , **__a : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[str] , *__a : Optional[int] , **__a : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : Tuple , **__a : Optional[int] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : int , *__a : int , **__a : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : Tuple , **__a : Optional[int] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : Dict , **__a : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[Any] , *__a : List[Any] , **__a : List[str] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Tuple , *__a : Any , **__a : Optional[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[Any] , *__a : Tuple , **__a : List[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Any , *__a : Any , **__a : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[Any] , *__a : Tuple , **__a : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Tuple , *__a : str , **__a : int ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : List[Any] , **__a : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : str , *__a : str , **__a : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Tuple , *__a : Optional[int] , **__a : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : Union[str, Any] , **__a : str ): requires_backends(cls , ["torch"] ) def _lowerCamelCase ( *lowercase : List[Any] , **lowercase : Dict ) -> List[str]: requires_backends(lowercase , ["torch"] ) def _lowerCamelCase ( *lowercase : Optional[Any] , **lowercase : Optional[Any] ) -> List[Any]: requires_backends(lowercase , ["torch"] ) def _lowerCamelCase ( *lowercase : List[Any] , **lowercase : Optional[int] ) -> int: requires_backends(lowercase , ["torch"] ) def _lowerCamelCase ( *lowercase : str , **lowercase : Any ) -> Optional[Any]: requires_backends(lowercase , ["torch"] ) def _lowerCamelCase ( *lowercase : int , **lowercase : Any ) -> int: requires_backends(lowercase , ["torch"] ) def _lowerCamelCase ( *lowercase : List[str] , **lowercase : Optional[int] ) -> int: requires_backends(lowercase , ["torch"] ) def _lowerCamelCase ( *lowercase : List[Any] , **lowercase : List[str] ) -> List[str]: requires_backends(lowercase , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : str , *__a : int , **__a : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[str] , *__a : List[Any] , **__a : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Tuple , *__a : Optional[Any] , **__a : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[int] , *__a : Dict , **__a : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : Any , **__a : str ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Tuple , *__a : int , **__a : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Union[str, Any] , *__a : Any , **__a : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : Tuple , **__a : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : List[str] , **__a : int ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[Any] , *__a : Optional[Any] , **__a : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Any , *__a : Optional[int] , **__a : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : int ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : Any , **__a : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : int , *__a : int , **__a : Any ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[int] , *__a : List[Any] , **__a : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : str ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Tuple , *__a : str , **__a : List[str] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[str] , *__a : int , **__a : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : int , *__a : List[Any] , **__a : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Union[str, Any] , *__a : Tuple , **__a : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[str] , *__a : Tuple , **__a : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[str] , *__a : int , **__a : Dict ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[Any] , *__a : int , **__a : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : Optional[int] , **__a : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Dict , *__a : Any , **__a : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : int , *__a : Any , **__a : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[int] , *__a : Optional[int] , **__a : int ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : Optional[Any] , **__a : Any ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Any , *__a : int , **__a : Dict ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[str] , *__a : Tuple , **__a : Tuple ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : Dict , **__a : int ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Any , *__a : str , **__a : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[Any] , *__a : Optional[Any] , **__a : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Any , *__a : Tuple , **__a : List[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Union[str, Any] , *__a : Tuple , **__a : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : int , **__a : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : Optional[Any] , **__a : Optional[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[str] , *__a : str , **__a : Any ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[Any] , *__a : Any , **__a : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[Any] , *__a : Tuple , **__a : List[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Tuple , *__a : List[Any] , **__a : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : int , *__a : int , **__a : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : int , *__a : List[Any] , **__a : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Tuple , *__a : Optional[int] , **__a : int ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : int , *__a : int , **__a : Any ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[Any] , *__a : Tuple , **__a : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[int] , *__a : Any , **__a : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[Any] , *__a : Any , **__a : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Union[str, Any] , *__a : Optional[Any] , **__a : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : Dict , **__a : Any ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : int , *__a : Any , **__a : List[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Dict , *__a : Union[str, Any] , **__a : Any ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[Any] , *__a : int , **__a : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : int , *__a : str , **__a : Dict ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[Any] , *__a : Tuple , **__a : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : Union[str, Any] , **__a : int ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[int] , *__a : Dict , **__a : List[str] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : List[str] , **__a : List[str] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : int , *__a : int , **__a : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[int] , *__a : Optional[int] , **__a : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : Optional[Any] , **__a : Tuple ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[Any] , *__a : List[str] , **__a : Any ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : List[str] , **__a : Optional[int] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : Tuple , **__a : List[str] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[Any] , *__a : Any , **__a : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : str , **__a : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[str] , *__a : Tuple , **__a : List[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Dict , *__a : List[str] , **__a : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : int , **__a : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : Any , **__a : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[Any] , *__a : Dict , **__a : List[str] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[Any] , *__a : Dict , **__a : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : List[str] , **__a : Tuple ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[str] , *__a : int , **__a : List[str] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Union[str, Any] , *__a : Optional[int] , **__a : Optional[int] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : List[Any] , **__a : Any ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Tuple , *__a : List[str] , **__a : Any ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[str] , *__a : Optional[int] , **__a : Any ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Any , *__a : List[Any] , **__a : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : Optional[Any] , **__a : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[Any] , *__a : Optional[int] , **__a : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : str , *__a : Tuple , **__a : List[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : Tuple , **__a : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : List[str] , **__a : List[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Any , *__a : List[Any] , **__a : Tuple ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Optional[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[int] , *__a : List[Any] , **__a : Tuple ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Union[str, Any] , *__a : Union[str, Any] , **__a : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[int] , *__a : Union[str, Any] , **__a : str ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Optional[Any] , *__a : Tuple , **__a : Any ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : Union[str, Any] , **__a : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Union[str, Any] , *__a : Optional[int] , **__a : Dict ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Union[str, Any] , *__a : int , **__a : List[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : str , **__a : str ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[Any] , *__a : Optional[int] , **__a : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Tuple , *__a : str , **__a : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : str , **__a : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Union[str, Any] , *__a : Tuple , **__a : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : Tuple , *__a : str , **__a : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Union[str, Any] , *__a : str , **__a : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : str , *__a : str , **__a : Optional[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ): """simple docstring""" __a =['torch'] def __init__( self : List[Any] , *__a : Tuple , **__a : List[str] ): requires_backends(self , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : Dict , *__a : Union[str, Any] , **__a : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCamelCase__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ): requires_backends(cls , ["torch"] )
63
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) lowerCAmelCase_ : List[str] = logging.getLogger(__name__) lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE def _lowerCamelCase ( ) -> Optional[int]: _a = argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." ) _a = parser.parse_args() return args def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]: try: if args.tpu_name: _a = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: _a = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(lowercase ) tf.tpu.experimental.initialize_tpu_system(lowercase ) return tpu def _lowerCamelCase ( lowercase : List[str] ) -> Any: _a = 0 for file in file_list: _a = file.split("/" )[-1] _a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 ) _a = int(lowercase ) num_samples += sample_count return num_samples def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int: _a = count_samples(lowercase ) _a = tf.data.Dataset.from_tensor_slices(lowercase ) if shuffle: _a = dataset.shuffle(len(lowercase ) ) _a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here _a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) if shuffle: assert shuffle_buffer_size is not None _a = dataset.shuffle(args.shuffle_buffer_size ) _a = dataset.batch(lowercase , drop_remainder=lowercase ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) _a = dataset.prefetch(lowercase ) return dataset def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict: if not args.no_tpu: _a = initialize_tpu(lowercase ) _a = tf.distribute.TPUStrategy(lowercase ) else: _a = tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) _a = AutoTokenizer.from_pretrained(args.tokenizer ) _a = AutoConfig.from_pretrained(args.pretrained_model_config ) _a = tokenizer.vocab_size _a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' ) _a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' ) _a = count_samples(lowercase ) _a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) _a = steps_per_epoch * args.num_epochs with strategy.scope(): _a = TFAutoModelForMaskedLM.from_config(lowercase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built _a , _a = create_optimizer( num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowercase , metrics=["accuracy"] ) def decode_fn(lowercase : int ): _a = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(lowercase , lowercase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. _a = DataCollatorForLanguageModeling( tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" ) def mask_with_collator(lowercase : List[Any] ): # TF really needs an isin() function _a = ( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) _a , _a = data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , ) return batch _a = args.per_replica_batch_size * strategy.num_replicas_in_sync _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , ) _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , ) _a = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) ) model.fit( lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": lowerCAmelCase_ : Any = parse_args() main(args)
63
1
'''simple docstring''' from math import sqrt def _lowerCamelCase ( lowercase : int ) -> bool: assert isinstance(lowercase , lowercase ) and ( number >= 0 ), "'number' must been an int and positive" _a = True # 0 and 1 are none primes. if number <= 1: _a = False for divisor in range(2 , int(round(sqrt(lowercase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: _a = False break # precondition assert isinstance(lowercase , lowercase ), "'status' must been from type bool" return status def _lowerCamelCase ( lowercase : Tuple ) -> Dict: assert isinstance(lowercase , lowercase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N _a = list(range(2 , n + 1 ) ) _a = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowercase ) ): for j in range(i + 1 , len(lowercase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): _a = 0 # filters actual prime numbers. _a = [x for x in begin_list if x != 0] # precondition assert isinstance(lowercase , lowercase ), "'ans' must been from type list" return ans def _lowerCamelCase ( lowercase : int ) -> int: assert isinstance(lowercase , lowercase ) and (n > 2), "'N' must been an int and > 2" _a = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowercase ): ans.append(lowercase ) # precondition assert isinstance(lowercase , lowercase ), "'ans' must been from type list" return ans def _lowerCamelCase ( lowercase : Tuple ) -> List[Any]: assert isinstance(lowercase , lowercase ) and number >= 0, "'number' must been an int and >= 0" _a = [] # this list will be returns of the function. # potential prime number factors. _a = 2 _a = number if number == 0 or number == 1: ans.append(lowercase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowercase ): while quotient != 1: if is_prime(lowercase ) and (quotient % factor == 0): ans.append(lowercase ) quotient /= factor else: factor += 1 else: ans.append(lowercase ) # precondition assert isinstance(lowercase , lowercase ), "'ans' must been from type list" return ans def _lowerCamelCase ( lowercase : List[Any] ) -> List[Any]: assert isinstance(lowercase , lowercase ) and ( number >= 0 ), "'number' bust been an int and >= 0" _a = 0 # prime factorization of 'number' _a = prime_factorization(lowercase ) _a = max(lowercase ) # precondition assert isinstance(lowercase , lowercase ), "'ans' must been from type int" return ans def _lowerCamelCase ( lowercase : Dict ) -> List[Any]: assert isinstance(lowercase , lowercase ) and ( number >= 0 ), "'number' bust been an int and >= 0" _a = 0 # prime factorization of 'number' _a = prime_factorization(lowercase ) _a = min(lowercase ) # precondition assert isinstance(lowercase , lowercase ), "'ans' must been from type int" return ans def _lowerCamelCase ( lowercase : List[Any] ) -> Dict: assert isinstance(lowercase , lowercase ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowercase ), "compare bust been from type bool" return number % 2 == 0 def _lowerCamelCase ( lowercase : Optional[int] ) -> Tuple: assert isinstance(lowercase , lowercase ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowercase ), "compare bust been from type bool" return number % 2 != 0 def _lowerCamelCase ( lowercase : List[str] ) -> Optional[int]: assert ( isinstance(lowercase , lowercase ) and (number > 2) and is_even(lowercase ) ), "'number' must been an int, even and > 2" _a = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' _a = get_prime_numbers(lowercase ) _a = len(lowercase ) # run variable for while-loops. _a = 0 _a = None # exit variable. for break up the loops _a = True while i < len_pn and loop: _a = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: _a = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowercase , lowercase ) and (len(lowercase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _lowerCamelCase ( lowercase : str , lowercase : int ) -> int: assert ( isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." _a = 0 while numbera != 0: _a = numbera % numbera _a = numbera _a = rest # precondition assert isinstance(lowercase , lowercase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _lowerCamelCase ( lowercase : str , lowercase : str ) -> List[str]: assert ( isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." _a = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' _a = prime_factorization(lowercase ) _a = prime_factorization(lowercase ) elif numbera == 1 or numbera == 1: _a = [] _a = [] _a = max(lowercase , lowercase ) _a = 0 _a = 0 _a = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: _a = prime_fac_a.count(lowercase ) _a = prime_fac_a.count(lowercase ) for _ in range(max(lowercase , lowercase ) ): ans *= n else: _a = prime_fac_a.count(lowercase ) for _ in range(lowercase ): ans *= n done.append(lowercase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: _a = prime_fac_a.count(lowercase ) for _ in range(lowercase ): ans *= n done.append(lowercase ) # precondition assert isinstance(lowercase , lowercase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _lowerCamelCase ( lowercase : int ) -> Optional[int]: assert isinstance(lowercase , lowercase ) and (n >= 0), "'number' must been a positive int" _a = 0 _a = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowercase ): ans += 1 # precondition assert isinstance(lowercase , lowercase ) and is_prime( lowercase ), "'ans' must been a prime number and from type int" return ans def _lowerCamelCase ( lowercase : Tuple , lowercase : Tuple ) -> str: assert ( is_prime(lowercase ) and is_prime(lowercase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" _a = p_number_a + 1 # jump to the next number _a = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowercase ): number += 1 while number < p_number_a: ans.append(lowercase ) number += 1 # fetch the next prime number. while not is_prime(lowercase ): number += 1 # precondition assert ( isinstance(lowercase , lowercase ) and ans[0] != p_number_a and ans[len(lowercase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _lowerCamelCase ( lowercase : List[str] ) -> Dict: assert isinstance(lowercase , lowercase ) and (n >= 1), "'n' must been int and >= 1" _a = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowercase ) # precondition assert ans[0] == 1 and ans[len(lowercase ) - 1] == n, "Error in function getDivisiors(...)" return ans def _lowerCamelCase ( lowercase : int ) -> Optional[Any]: assert isinstance(lowercase , lowercase ) and ( number > 1 ), "'number' must been an int and >= 1" _a = get_divisors(lowercase ) # precondition assert ( isinstance(lowercase , lowercase ) and (divisors[0] == 1) and (divisors[len(lowercase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] ) -> int: assert ( isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. _a = gcd(abs(lowercase ) , abs(lowercase ) ) # precondition assert ( isinstance(lowercase , lowercase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _lowerCamelCase ( lowercase : int ) -> Any: assert isinstance(lowercase , lowercase ) and (n >= 0), "'n' must been a int and >= 0" _a = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _lowerCamelCase ( lowercase : int ) -> int: assert isinstance(lowercase , lowercase ) and (n >= 0), "'n' must been an int and >= 0" _a = 0 _a = 1 _a = 1 # this will be return for _ in range(n - 1 ): _a = ans ans += fiba _a = tmp return ans
63
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='LayoutLMv3ImageProcessor' __a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor _a = self.image_processor(images=__a , return_tensors=__a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__a , __a ): _a = [text] # add batch dimension (as the image processor always adds a batch dimension) _a = features["words"] _a = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) # add pixel values _a = features.pop("pixel_values" ) if return_overflowing_tokens is True: _a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] ) _a = images return encoded_inputs def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _a = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__a ) != len(__a ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f' {len(__a )} and {len(__a )}' ) return images_with_overflow def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : Tuple ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCamelCase__ ( self : int ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
1
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @slow def UpperCamelCase__ ( self : Union[str, Any] ): _a = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) _a = AutoTokenizer.from_pretrained("google/mt5-small" ) _a = tokenizer("Hello there" , return_tensors="np" ).input_ids _a = tokenizer("Hi I am" , return_tensors="np" ).input_ids _a = shift_tokens_right(__a , model.config.pad_token_id , model.config.decoder_start_token_id ) _a = model(__a , decoder_input_ids=__a ).logits _a = optax.softmax_cross_entropy(__a , onehot(__a , logits.shape[-1] ) ).mean() _a = -(labels.shape[-1] * loss.item()) _a = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
63
'''simple docstring''' from ....utils import logging lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ): _a = config.__dict__ _a = modal_hidden_size if num_labels: _a = num_labels
63
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ : str = logging.get_logger(__name__) def _lowerCamelCase ( lowercase : int , lowercase : Union[str, Any]=False ) -> Optional[Any]: _a = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _a = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str]=False ) -> List[Any]: for i in range(config.num_hidden_layers ): if base_model: _a = "" else: _a = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _a = state_dict.pop(F'blocks.{i}.attn.qkv.weight' ) _a = state_dict.pop(F'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _a = in_proj_weight[ : config.hidden_size, : ] _a = in_proj_bias[: config.hidden_size] _a = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _a = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _a = in_proj_weight[ -config.hidden_size :, : ] _a = in_proj_bias[-config.hidden_size :] def _lowerCamelCase ( lowercase : str ) -> List[str]: _a = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(lowercase , lowercase ) def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : Union[str, Any] ) -> int: _a = dct.pop(lowercase ) _a = val def _lowerCamelCase ( ) -> Dict: _a = "http://images.cocodataset.org/val2017/000000039769.jpg" _a = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : List[str] , lowercase : List[Any]=False ) -> List[str]: _a = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=lowercase , ) _a = ViTHybridConfig(backbone_config=lowercase , image_size=384 , num_labels=1000 ) _a = False # load original model from timm _a = timm.create_model(lowercase , pretrained=lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _a = timm_model.state_dict() if base_model: remove_classification_head_(lowercase ) _a = create_rename_keys(lowercase , lowercase ) for src, dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) read_in_q_k_v(lowercase , lowercase , lowercase ) _a = "huggingface/label-files" _a = "imagenet-1k-id2label.json" _a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) _a = {int(lowercase ): v for k, v in idalabel.items()} _a = idalabel _a = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _a = ViTHybridModel(lowercase ).eval() else: _a = ViTHybridForImageClassification(lowercase ).eval() model.load_state_dict(lowercase ) # create image processor _a = create_transform(**resolve_data_config({} , model=lowercase ) ) _a = transform.transforms _a = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _a = ViTHybridImageProcessor( do_resize=lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _a = prepare_img() _a = transform(lowercase ).unsqueeze(0 ) _a = processor(lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(lowercase , lowercase ) # verify logits with torch.no_grad(): _a = model(lowercase ) _a = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _a = timm_model.forward_features(lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase , outputs.pooler_output , atol=1E-3 ) else: _a = timm_model(lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase ) print(F'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowercase ) if push_to_hub: print(F'Pushing model and processor to the hub {vit_name}' ) model.push_to_hub(F'ybelkada/{vit_name}' ) processor.push_to_hub(F'ybelkada/{vit_name}' ) if __name__ == "__main__": lowerCAmelCase_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_r50_s16_384', type=str, help='Name of the hybrid ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) lowerCAmelCase_ : Union[str, Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int = 100 ) -> int: _a = 0 _a = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
63
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : str = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='blip_text_model' def __init__( self : int , __a : int=3_05_24 , __a : List[Any]=7_68 , __a : Dict=7_68 , __a : List[str]=30_72 , __a : Any=7_68 , __a : Optional[Any]=12 , __a : Optional[int]=8 , __a : Any=5_12 , __a : Tuple="gelu" , __a : Any=1e-1_2 , __a : Optional[Any]=0.0 , __a : int=0.0 , __a : Dict=0.02 , __a : Optional[int]=3_05_22 , __a : Union[str, Any]=2 , __a : Tuple=0 , __a : Tuple=1_02 , __a : Union[str, Any]=True , __a : Any=True , **__a : List[str] , ): super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) _a = vocab_size _a = hidden_size _a = encoder_hidden_size _a = intermediate_size _a = projection_dim _a = hidden_dropout_prob _a = num_hidden_layers _a = num_attention_heads _a = max_position_embeddings _a = layer_norm_eps _a = hidden_act _a = initializer_range _a = attention_probs_dropout_prob _a = is_decoder _a = use_cache @classmethod def UpperCamelCase__ ( cls : Tuple , __a : Union[str, os.PathLike] , **__a : Any ): cls._set_token_in_kwargs(__a ) _a , _a = cls.get_config_dict(__a , **__a ) # get the text config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _a = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__a , **__a ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='blip_vision_model' def __init__( self : Optional[int] , __a : Any=7_68 , __a : Optional[Any]=30_72 , __a : Optional[Any]=5_12 , __a : Tuple=12 , __a : List[Any]=12 , __a : str=3_84 , __a : Dict=16 , __a : str="gelu" , __a : Optional[Any]=1e-5 , __a : Dict=0.0 , __a : Dict=1e-1_0 , **__a : Dict , ): super().__init__(**__a ) _a = hidden_size _a = intermediate_size _a = projection_dim _a = num_hidden_layers _a = num_attention_heads _a = patch_size _a = image_size _a = initializer_range _a = attention_dropout _a = layer_norm_eps _a = hidden_act @classmethod def UpperCamelCase__ ( cls : str , __a : Union[str, os.PathLike] , **__a : str ): cls._set_token_in_kwargs(__a ) _a , _a = cls.get_config_dict(__a , **__a ) # get the vision config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _a = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__a , **__a ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='blip' __a =True def __init__( self : Union[str, Any] , __a : Optional[int]=None , __a : str=None , __a : int=5_12 , __a : int=2.6592 , __a : Any=2_56 , **__a : str , ): super().__init__(**__a ) if text_config is None: _a = {} logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." ) if vision_config is None: _a = {} logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." ) _a = BlipTextConfig(**__a ) _a = BlipVisionConfig(**__a ) _a = self.vision_config.hidden_size _a = projection_dim _a = logit_scale_init_value _a = 1.0 _a = 0.02 _a = image_text_hidden_size @classmethod def UpperCamelCase__ ( cls : Tuple , __a : BlipTextConfig , __a : BlipVisionConfig , **__a : Optional[int] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a ) def UpperCamelCase__ ( self : Union[str, Any] ): _a = copy.deepcopy(self.__dict__ ) _a = self.text_config.to_dict() _a = self.vision_config.to_dict() _a = self.__class__.model_type return output
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: if num < 0: return False _a = num _a = 0 while num > 0: _a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
63
1
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ : Dict = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =AlbertTokenizer __a =AlbertTokenizerFast __a =True __a =True __a =True def UpperCamelCase__ ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing _a = AlbertTokenizer(__a ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self : Dict , __a : int ): _a = "this is a test" _a = "this is a test" return input_text, output_text def UpperCamelCase__ ( self : Any ): _a = "<pad>" _a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def UpperCamelCase__ ( self : List[Any] ): _a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "▁eloquent" ) self.assertEqual(len(__a ) , 3_00_00 ) def UpperCamelCase__ ( self : Union[str, Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def UpperCamelCase__ ( self : Dict ): if not self.test_rust_tokenizer: return _a = self.get_tokenizer() _a = self.get_rust_tokenizer() _a = "I was born in 92000, and this is falsé." _a = tokenizer.tokenize(__a ) _a = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) _a = tokenizer.encode(__a , add_special_tokens=__a ) _a = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) _a = self.get_rust_tokenizer() _a = tokenizer.encode(__a ) _a = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) def UpperCamelCase__ ( self : Dict ): _a = AlbertTokenizer(__a , keep_accents=__a ) _a = tokenizer.tokenize("This is a test" ) self.assertListEqual(__a , ["▁this", "▁is", "▁a", "▁test"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 12_89] ) _a = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] ) _a = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual(__a , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] ) _a = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , ) def UpperCamelCase__ ( self : int ): _a = AlbertTokenizer(__a ) _a = tokenizer.encode("sequence builders" ) _a = tokenizer.encode("multi-sequence build" ) _a = tokenizer.build_inputs_with_special_tokens(__a ) _a = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def UpperCamelCase__ ( self : str ): # fmt: off _a = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
63
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[str] = [ 'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : int = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='gpt_bigcode' __a =['past_key_values'] __a ={ 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ): _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = attention_softmax_in_fpaa _a = scale_attention_softmax_in_fpaa _a = multi_query _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
63
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase_ : Any = get_tests_dir('fixtures') lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json') class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] ): _a = 0 def UpperCamelCase__ ( self : str ): _a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: _a = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _a = AutoFeatureExtractor.from_pretrained(__a ).to_dict() config_dict.pop("feature_extractor_type" ) _a = WavaVecaFeatureExtractor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) # make sure private variable is not incorrectly saved _a = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Union[str, Any] ): with self.assertRaisesRegex( __a , "bert-base is not a local folder and is not a valid model identifier" ): _a = AutoFeatureExtractor.from_pretrained("bert-base" ) def UpperCamelCase__ ( self : Optional[Any] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" ) def UpperCamelCase__ ( self : List[Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): _a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def UpperCamelCase__ ( self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) def UpperCamelCase__ ( self : Any ): try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoFeatureExtractor.register(__a , __a ) # Now that the config is registered, it can be used as any other config with the auto-API _a = CustomFeatureExtractor.from_pretrained(__a ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase__ ( self : Tuple ): class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =True try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # If remote code is not set, the default is to use local _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
63
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ : Optional[int] = { 'configuration_efficientnet': [ 'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EfficientNetConfig', 'EfficientNetOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Any = ['EfficientNetImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[Any] = [ 'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'EfficientNetForImageClassification', 'EfficientNetModel', 'EfficientNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
63
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : int = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='gpt_bigcode' __a =['past_key_values'] __a ={ 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ): _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = attention_softmax_in_fpaa _a = scale_attention_softmax_in_fpaa _a = multi_query _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
63
1
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: _a = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
63
1
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase_ : Optional[Any] = 'platform' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def _lowerCamelCase ( lowercase : int , lowercase : Any , lowercase : Optional[Any]=None , lowercase : Tuple=None , lowercase : List[str]=None , lowercase : int=None , lowercase : List[str]=None , lowercase : str=None , ) -> Optional[Any]: if attention_mask is None: _a = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _a = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _a = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _a = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _a = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[Any] , __a : Tuple , __a : Optional[int]=13 , __a : Any=7 , __a : Optional[int]=True , __a : Any=False , __a : Optional[int]=99 , __a : Dict=16 , __a : Optional[Any]=2 , __a : Dict=4 , __a : List[Any]=4 , __a : Optional[Any]="gelu" , __a : Any=0.1 , __a : Optional[Any]=0.1 , __a : Dict=32 , __a : Optional[Any]=2 , __a : Optional[Any]=1 , __a : Tuple=0 , __a : Optional[int]=0.02 , ): _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = eos_token_id _a = pad_token_id _a = bos_token_id _a = initializer_range def UpperCamelCase__ ( self : Dict ): _a = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _a = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _a = shift_tokens_right(__a , 1 , 2 ) _a = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__a , ) _a = prepare_blenderbot_inputs_dict(__a , __a , __a ) return config, inputs_dict def UpperCamelCase__ ( self : int ): _a , _a = self.prepare_config_and_inputs() return config, inputs_dict def UpperCamelCase__ ( self : List[Any] , __a : int , __a : Any , __a : int ): _a = 20 _a = model_class_name(__a ) _a = model.encode(inputs_dict["input_ids"] ) _a , _a = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _a = model.init_cache(decoder_input_ids.shape[0] , __a , __a ) _a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) _a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a = model.decode( decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , ) _a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) _a = model.decode( decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , ) _a = model.decode(__a , __a ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def UpperCamelCase__ ( self : Any , __a : int , __a : Optional[int] , __a : Dict ): _a = 20 _a = model_class_name(__a ) _a = model.encode(inputs_dict["input_ids"] ) _a , _a = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _a = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _a = model.init_cache(decoder_input_ids.shape[0] , __a , __a ) _a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a = model.decode( decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , ) _a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) _a = model.decode( decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , ) _a = model.decode(__a , __a , decoder_attention_mask=__a ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) @require_flax class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" __a =99 def UpperCamelCase__ ( self : Union[str, Any] ): _a = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) _a = input_ids.shape[0] _a = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def UpperCamelCase__ ( self : Union[str, Any] ): _a , _a , _a = self._get_config_and_data() _a = FlaxBlenderbotForConditionalGeneration(__a ) _a = lm_model(input_ids=__a ) _a = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , __a ) def UpperCamelCase__ ( self : List[str] ): _a = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) _a = FlaxBlenderbotForConditionalGeneration(__a ) _a = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) _a = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) _a = lm_model(input_ids=__a , decoder_input_ids=__a ) _a = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , __a ) def UpperCamelCase__ ( self : Tuple ): _a = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) _a = shift_tokens_right(__a , 1 , 2 ) _a = np.equal(__a , 1 ).astype(np.floataa ).sum() _a = np.equal(__a , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(__a , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase , lowerCamelCase_ ): """simple docstring""" __a =True __a =( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __a =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def UpperCamelCase__ ( self : List[Any] ): _a = FlaxBlenderbotModelTester(self ) def UpperCamelCase__ ( self : List[str] ): _a , _a = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__a , __a , __a ) def UpperCamelCase__ ( self : Optional[int] ): _a , _a = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a ) def UpperCamelCase__ ( self : Any ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = self._prepare_for_class(__a , __a ) _a = model_class(__a ) @jax.jit def encode_jitted(__a : Tuple , __a : Union[str, Any]=None , **__a : Optional[Any] ): return model.encode(input_ids=__a , attention_mask=__a ) with self.subTest("JIT Enabled" ): _a = encode_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _a = encode_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCamelCase__ ( self : Optional[int] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = model_class(__a ) _a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) _a = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(__a : Dict , __a : str , __a : Tuple ): return model.decode( decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , ) with self.subTest("JIT Enabled" ): _a = decode_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _a = decode_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCamelCase__ ( self : Dict ): for model_class_name in self.all_model_classes: _a = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _a = np.ones((1, 1) ) * model.config.eos_token_id _a = model(__a ) self.assertIsNotNone(__a ) @unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." ) @slow def UpperCamelCase__ ( self : List[str] ): _a = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25} _a = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True} _a = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=__a ) _a = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" ) _a = ["Sam"] _a = tokenizer(__a , return_tensors="jax" ) _a = model.generate(**__a , **__a ) _a = "Sam is a great name. It means \"sun\" in Gaelic." _a = tokenizer.batch_decode(__a , **__a ) assert generated_txt[0].strip() == tgt_text
63
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='deta' __a ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(__a , __a ): _a = backbone_config.pop("model_type" ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(__a ) _a = backbone_config _a = num_queries _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = init_xavier_std _a = encoder_layerdrop _a = auxiliary_loss _a = position_embedding_type # deformable attributes _a = num_feature_levels _a = encoder_n_points _a = decoder_n_points _a = two_stage _a = two_stage_num_proposals _a = with_box_refine _a = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher _a = class_cost _a = bbox_cost _a = giou_cost # Loss coefficients _a = mask_loss_coefficient _a = dice_loss_coefficient _a = bbox_loss_coefficient _a = giou_loss_coefficient _a = eos_coefficient _a = focal_alpha super().__init__(is_encoder_decoder=__a , **__a ) @property def UpperCamelCase__ ( self : Optional[Any] ): return self.encoder_attention_heads @property def UpperCamelCase__ ( self : Dict ): return self.d_model def UpperCamelCase__ ( self : List[str] ): _a = copy.deepcopy(self.__dict__ ) _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output
63
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) __a =( { 'feature-extraction': TFMobileBertModel, 'fill-mask': TFMobileBertForMaskedLM, 'question-answering': TFMobileBertForQuestionAnswering, 'text-classification': TFMobileBertForSequenceClassification, 'token-classification': TFMobileBertForTokenClassification, 'zero-shot': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) __a =False __a =False def UpperCamelCase__ ( self : str , __a : Tuple , __a : Optional[Any] , __a : List[str]=False ): _a = super()._prepare_for_class(__a , __a , return_labels=__a ) if return_labels: if model_class in get_values(__a ): _a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : List[str] , __a : int , __a : Union[str, Any]=13 , __a : int=7 , __a : List[str]=True , __a : str=True , __a : Union[str, Any]=True , __a : Union[str, Any]=True , __a : Tuple=99 , __a : Any=32 , __a : Tuple=32 , __a : Any=2 , __a : int=4 , __a : Dict=37 , __a : Union[str, Any]="gelu" , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : Dict=5_12 , __a : int=16 , __a : str=2 , __a : Tuple=0.02 , __a : Any=3 , __a : Tuple=4 , __a : Any=None , ): _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_input_mask _a = use_token_type_ids _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = type_sequence_label_size _a = initializer_range _a = num_labels _a = num_choices _a = scope _a = embedding_size def UpperCamelCase__ ( self : Any ): _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = None if self.use_input_mask: _a = random_attention_mask([self.batch_size, self.seq_length] ) _a = None if self.use_token_type_ids: _a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a = None _a = None _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a = ids_tensor([self.batch_size] , self.num_choices ) _a = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self : Optional[int] , __a : Optional[Any] , __a : int , __a : Tuple , __a : str , __a : List[Any] , __a : List[Any] , __a : str ): _a = TFMobileBertModel(config=__a ) _a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _a = model(__a ) _a = [input_ids, input_mask] _a = model(__a ) _a = model(__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase__ ( self : List[str] , __a : List[str] , __a : Dict , __a : Tuple , __a : Union[str, Any] , __a : Dict , __a : List[Any] , __a : List[str] ): _a = TFMobileBertForMaskedLM(config=__a ) _a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _a = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self : Union[str, Any] , __a : Dict , __a : Dict , __a : Any , __a : Any , __a : str , __a : Tuple , __a : str ): _a = TFMobileBertForNextSentencePrediction(config=__a ) _a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _a = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCamelCase__ ( self : List[Any] , __a : Tuple , __a : Optional[Any] , __a : List[Any] , __a : List[Any] , __a : List[str] , __a : Optional[Any] , __a : List[Any] ): _a = TFMobileBertForPreTraining(config=__a ) _a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _a = model(__a ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCamelCase__ ( self : List[str] , __a : List[Any] , __a : str , __a : str , __a : Union[str, Any] , __a : List[Any] , __a : List[Any] , __a : List[Any] ): _a = self.num_labels _a = TFMobileBertForSequenceClassification(config=__a ) _a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _a = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self : Any , __a : str , __a : Any , __a : Optional[int] , __a : int , __a : Optional[int] , __a : int , __a : List[Any] ): _a = self.num_choices _a = TFMobileBertForMultipleChoice(config=__a ) _a = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) _a = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) _a = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) _a = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _a = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase__ ( self : List[str] , __a : List[str] , __a : List[Any] , __a : Dict , __a : Dict , __a : int , __a : str , __a : List[str] ): _a = self.num_labels _a = TFMobileBertForTokenClassification(config=__a ) _a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _a = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self : Optional[Any] , __a : Tuple , __a : List[Any] , __a : str , __a : int , __a : Any , __a : int , __a : List[str] ): _a = TFMobileBertForQuestionAnswering(config=__a ) _a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _a = model(__a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self : Any ): _a = self.prepare_config_and_inputs() ( ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ) = config_and_inputs _a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def UpperCamelCase__ ( self : str ): _a = TFMobileBertModelTest.TFMobileBertModelTester(self ) _a = ConfigTester(self , config_class=__a , hidden_size=37 ) def UpperCamelCase__ ( self : Tuple ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self : List[str] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__a ) def UpperCamelCase__ ( self : Union[str, Any] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a ) def UpperCamelCase__ ( self : Any ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a ) def UpperCamelCase__ ( self : List[Any] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a ) def UpperCamelCase__ ( self : Union[str, Any] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__a ) def UpperCamelCase__ ( self : List[Any] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__a ) def UpperCamelCase__ ( self : Optional[Any] ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a ) def UpperCamelCase__ ( self : Any ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__a ) @slow def UpperCamelCase__ ( self : List[str] ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _a = TFMobileBertModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_tf class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @slow def UpperCamelCase__ ( self : Optional[Any] ): _a = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" ) _a = tf.constant([[0, 1, 2, 3, 4, 5]] ) _a = model(__a )[0] _a = [1, 6, 3_05_22] self.assertEqual(output.shape , __a ) _a = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
63
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]: _a = AutoTokenizer.from_pretrained(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase ) _a = tok.pad_token_id def get_lens(lowercase : Optional[int] ): _a = tqdm( DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a = [] for batch in dl: _a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist() _a = batch["labels"].ne(lowercase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowercase , lowercase ): max_lens.append(max(lowercase , lowercase ) ) else: max_lens.extend(lowercase ) return max_lens _a = get_lens(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase ) _a = get_lens(lowercase ) pickle_save(lowercase , train_ds.len_file ) pickle_save(lowercase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
63
1
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __SCREAMING_SNAKE_CASE (TensorFormatter[Mapping, 'torch.Tensor', Mapping] ): """simple docstring""" def __init__( self : Any , __a : Tuple=None , **__a : Tuple ): super().__init__(features=__a ) _a = torch_tensor_kwargs import torch # noqa import torch at initialization def UpperCamelCase__ ( self : Optional[Any] , __a : Any ): import torch if isinstance(__a , __a ) and column: if all( isinstance(__a , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(__a ) return column def UpperCamelCase__ ( self : Optional[int] , __a : Tuple ): import torch if isinstance(__a , (str, bytes, type(__a )) ): return value elif isinstance(__a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() _a = {} if isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): _a = {"dtype": torch.intaa} elif isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): _a = {"dtype": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__a , PIL.Image.Image ): _a = np.asarray(__a ) return torch.tensor(__a , **{**default_dtype, **self.torch_tensor_kwargs} ) def UpperCamelCase__ ( self : Optional[int] , __a : List[Any] ): import torch # support for torch, tf, jax etc. if hasattr(__a , "__array__" ) and not isinstance(__a , torch.Tensor ): _a = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__a , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] ) elif isinstance(__a , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] ) return self._tensorize(__a ) def UpperCamelCase__ ( self : int , __a : dict ): return map_nested(self._recursive_tensorize , __a , map_list=__a ) def UpperCamelCase__ ( self : Any , __a : pa.Table ): _a = self.numpy_arrow_extractor().extract_row(__a ) _a = self.python_features_decoder.decode_row(__a ) return self.recursive_tensorize(__a ) def UpperCamelCase__ ( self : List[Any] , __a : pa.Table ): _a = self.numpy_arrow_extractor().extract_column(__a ) _a = self.python_features_decoder.decode_column(__a , pa_table.column_names[0] ) _a = self.recursive_tensorize(__a ) _a = self._consolidate(__a ) return column def UpperCamelCase__ ( self : Optional[Any] , __a : pa.Table ): _a = self.numpy_arrow_extractor().extract_batch(__a ) _a = self.python_features_decoder.decode_batch(__a ) _a = self.recursive_tensorize(__a ) for column_name in batch: _a = self._consolidate(batch[column_name] ) return batch
63
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): _a = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): # pass variant but use the non-variant filenames _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Dict ): _a = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : List[str] ): # pass variant but use the non-variant filenames _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[int] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
63
1
'''simple docstring''' def _lowerCamelCase ( lowercase : int = 200 ) -> int: _a = [1, 2, 5, 10, 20, 50, 100, 200] _a = [0] * (pence + 1) _a = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowercase , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
63
'''simple docstring''' def _lowerCamelCase ( lowercase : bytes ) -> str: return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] ) def _lowerCamelCase ( lowercase : str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(lowercase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(lowercase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
63
1
'''simple docstring''' import inspect import unittest class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Optional[Any] ): try: import diffusers # noqa: F401 except ImportError: assert False def UpperCamelCase__ ( self : Tuple ): import diffusers from diffusers.dependency_versions_table import deps _a = inspect.getmembers(__a , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": _a = "k-diffusion" elif backend == "invisible_watermark": _a = "invisible-watermark" assert backend in deps, f'{backend} is not in the deps table!'
63
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str: for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any: model.train() _a = model(lowercase ) _a = F.mse_loss(lowercase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase ) def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]: set_seed(42 ) _a = RegressionModel() _a = deepcopy(lowercase ) _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) model.to(accelerator.device ) if sched: _a = AdamW(params=model.parameters() , lr=1E-3 ) _a = AdamW(params=ddp_model.parameters() , lr=1E-3 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) # Make a copy of `model` if sched: _a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase ) else: _a , _a = accelerator.prepare(lowercase , lowercase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]: # Test when on a single CPU or GPU that the context manager does nothing _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase , lowercase , lowercase , lowercase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : Tuple ) -> Tuple: # Test on distributed setup that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] GradientState._reset_state() def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' _a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase )) if accelerator.num_processes > 1: check_model_parameters(lowercase , lowercase , lowercase , lowercase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def _lowerCamelCase ( ) -> Any: _a = Accelerator() _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) _a = RegressionDataset(length=96 ) _a = DataLoader(lowercase , batch_size=16 ) _a , _a = accelerator.prepare(lowercase , lowercase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if iteration < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if batch_num < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _lowerCamelCase ( ) -> Optional[Any]: _a = Accelerator() _a = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(lowercase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(lowercase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(lowercase , lowercase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase ) def _lowerCamelCase ( lowercase : Any ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
63
1
'''simple docstring''' import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowerCAmelCase_ : str = re.compile('[^A-Za-z_0-9]') # parameters used in DuplicationIndex lowerCAmelCase_ : List[str] = 10 lowerCAmelCase_ : Optional[Any] = 2_56 def _lowerCamelCase ( lowercase : List[str] ) -> Optional[MinHash]: if len(lowercase ) < MIN_NUM_TOKENS: return None _a = MinHash(num_perm=lowercase ) for token in set(lowercase ): min_hash.update(token.encode() ) return min_hash def _lowerCamelCase ( lowercase : str ) -> Set[str]: return {t for t in NON_ALPHA.split(lowercase ) if len(t.strip() ) > 0} class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Dict , *, __a : float = 0.85 , ): _a = duplication_jaccard_threshold _a = NUM_PERM _a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _a = defaultdict(__a ) def UpperCamelCase__ ( self : Dict , __a : Tuple , __a : MinHash ): _a = self._index.query(__a ) if code_key in self._index.keys: print(f'Duplicate key {code_key}' ) return self._index.insert(__a , __a ) if len(__a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__a ) break else: self._duplicate_clusters[close_duplicates[0]].add(__a ) def UpperCamelCase__ ( self : str ): _a = [] for base, duplicates in self._duplicate_clusters.items(): _a = [base] + list(__a ) # reformat the cluster to be a list of dict _a = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster] duplicate_clusters.append(__a ) return duplicate_clusters def UpperCamelCase__ ( self : Optional[int] , __a : Any ): _a = self.get_duplicate_clusters() with open(__a , "w" ) as f: json.dump(__a , __a ) def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[Any]: _a , _a = element _a = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def _lowerCamelCase ( lowercase : Type[Dataset] ) -> Optional[int]: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowercase , max_queue_size=1_0000 ) , chunksize=100 , ): if data is not None: yield data def _lowerCamelCase ( lowercase : Type[Dataset] , lowercase : float ) -> Union[str, Any]: _a = DuplicationIndex(duplication_jaccard_threshold=lowercase ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowercase ) ) , max_queue_size=100 ) ): di.add(lowercase , lowercase ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def _lowerCamelCase ( lowercase : str , lowercase : str ) -> float: _a = get_tokens(lowercase ) _a = get_tokens(lowercase ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowerCAmelCase_ : Dict = None def _lowerCamelCase ( lowercase : Optional[int] , lowercase : str ) -> Optional[Any]: _a = [] for elementa in cluster: _a = _shared_dataset[elementa["base_index"]]["content"] for elementa in extremes: _a = _shared_dataset[elementa["base_index"]]["content"] if jaccard_similarity(lowercase , lowercase ) >= jaccard_threshold: elementa["copies"] += 1 break else: _a = 1 extremes.append(lowercase ) return extremes def _lowerCamelCase ( lowercase : List[Any] , lowercase : Dict , lowercase : Dict ) -> Optional[Any]: global _shared_dataset _a = dataset _a = [] _a = partial(_find_cluster_extremes_shared , jaccard_threshold=lowercase ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowercase , lowercase , ) , total=len(lowercase ) , ): extremes_list.append(lowercase ) return extremes_list def _lowerCamelCase ( lowercase : Type[Dataset] , lowercase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: _a = make_duplicate_clusters(lowercase , lowercase ) _a = {x["base_index"] for cluster in duplicate_clusters for x in cluster} _a = {} _a = find_extremes(lowercase , lowercase , lowercase ) for extremes in extremes_clusters: for element in extremes: _a = element _a = duplicate_indices - set(extreme_dict.keys() ) _a = dataset.filter(lambda lowercase , lowercase : idx not in remove_indices , with_indices=lowercase ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _a = element["base_index"] in extreme_dict if element["is_extreme"]: _a = extreme_dict[element["base_index"]]["copies"] print(F'Original dataset size: {len(lowercase )}' ) print(F'Number of duplicate clusters: {len(lowercase )}' ) print(F'Files in duplicate cluster: {len(lowercase )}' ) print(F'Unique files in duplicate cluster: {len(lowercase )}' ) print(F'Filtered dataset size: {len(lowercase )}' ) return ds_filter, duplicate_clusters
63
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : List[str] = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='trocr' __a =['past_key_values'] __a ={ 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ): _a = vocab_size _a = d_model _a = decoder_layers _a = decoder_attention_heads _a = decoder_ffn_dim _a = activation_function _a = max_position_embeddings _a = dropout _a = attention_dropout _a = activation_dropout _a = init_std _a = decoder_layerdrop _a = use_cache _a = scale_embedding _a = use_learned_position_embeddings _a = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
63
1
'''simple docstring''' from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def _lowerCamelCase ( lowercase : Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]: _a = [] _a = [] _a = [] for rt in rc.restypes: _a = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) _a = {name: i for i, name in enumerate(lowercase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) _a = torch.tensor( lowercase , dtype=torch.intaa , device=protein["aatype"].device , ) _a = torch.tensor( lowercase , dtype=torch.intaa , device=protein["aatype"].device , ) _a = torch.tensor( lowercase , dtype=torch.floataa , device=protein["aatype"].device , ) _a = protein["aatype"].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein _a = restype_atomaa_to_atomaa[protein_aatype] _a = restype_atomaa_mask[protein_aatype] _a = residx_atomaa_mask _a = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back _a = restype_atomaa_to_atomaa[protein_aatype] _a = residx_atomaa_to_atomaa.long() # create the corresponding mask _a = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device ) for restype, restype_letter in enumerate(rc.restypes ): _a = rc.restype_atoa[restype_letter] _a = rc.residue_atoms[restype_name] for atom_name in atom_names: _a = rc.atom_order[atom_name] _a = 1 _a = restype_atomaa_mask[protein_aatype] _a = residx_atomaa_mask return protein def _lowerCamelCase ( lowercase : Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]: _a = tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["aatype"].device ) , lowercase , np.ndarray ) _a = tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) ) return out
63
'''simple docstring''' import argparse import os import re lowerCAmelCase_ : Any = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"') def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]: with open(lowercase , "r" , encoding="utf-8" ) as f: _a = f.read() _a = content.split("\n" ) _a = [] _a = 0 while line_idx < len(lowercase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: _a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 _a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": _a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers _a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowercase , "w" , encoding="utf-8" ) as f: f.write("\n".join(lowercase ) ) elif "\n".join(lowercase ) != content: return True def _lowerCamelCase ( lowercase : bool = False ) -> List[str]: _a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )] _a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames] if not overwrite and any(lowercase ): _a = [f for f, d in zip(lowercase , lowercase ) if d] raise ValueError( F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix' " this." ) if __name__ == "__main__": lowerCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') lowerCAmelCase_ : Optional[int] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
63
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase_ : Union[str, Any] = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Any = ['OwlViTFeatureExtractor'] lowerCAmelCase_ : List[Any] = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Tuple = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='big_bird' def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ): super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) _a = vocab_size _a = max_position_embeddings _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = type_vocab_size _a = layer_norm_eps _a = use_cache _a = rescale_embeddings _a = attention_type _a = use_bias _a = block_size _a = num_random_blocks _a = classifier_dropout class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property def UpperCamelCase__ ( self : Optional[int] ): if self.task == "multiple-choice": _a = {0: "batch", 1: "choice", 2: "sequence"} else: _a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
63
1
'''simple docstring''' import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values lowerCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument('--user', type=str, default='ubuntu') parser.add_argument('--host', type=str, default='localhost') parser.add_argument('--key_path', type=str, default=None) parser.add_argument('--instance', type=str, default='V100:1') parser.add_argument('--provider', type=str, default='cheapest') parser.add_argument('--use_spot', type=bool, default=False) parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py') lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('Cannot specify both BYO and on-demand cluster args') lowerCAmelCase_ : List[Any] = rh.cluster( name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path} ) else: lowerCAmelCase_ : Dict = rh.cluster( name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) lowerCAmelCase_ : Optional[Any] = args.example.rsplit('/', 1)[0] # Set up remote environment cluster.install_packages(['pip:./']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
63
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" @register_to_config def __init__( self : Dict , *, __a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ): super().__init__() _a = nn.Parameter(torch.zeros(__a ) ) # parameters for additional clip time embeddings _a = nn.Linear(__a , __a ) _a = nn.Linear(__a , __a ) # parameters for encoder hidden states _a = clip_extra_context_tokens _a = nn.Linear( __a , self.clip_extra_context_tokens * cross_attention_dim ) _a = nn.Linear(__a , __a ) _a = nn.LayerNorm(__a ) def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _a = image_embeddings.shape[0] _a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _a = classifier_free_guidance_embeddings.expand( __a , -1 ) _a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _a = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _a = self.embedding_proj(__a ) _a = self.clip_image_embeddings_project_to_time_embeddings(__a ) _a = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _a = self.clip_extra_context_tokens_proj(__a ) _a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens ) _a = clip_extra_context_tokens.permute(0 , 2 , 1 ) _a = self.encoder_hidden_states_proj(__a ) _a = self.text_encoder_hidden_states_norm(__a ) _a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
63
1
'''simple docstring''' import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase ( lowercase : Any , lowercase : Tuple , lowercase : Optional[Any] ) -> Optional[Any]: # Initialise PyTorch model _a = TaConfig.from_json_file(lowercase ) print(F'Building PyTorch model from configuration: {config}' ) _a = TaForConditionalGeneration(lowercase ) # Load weights from tf checkpoint load_tf_weights_in_ta(lowercase , lowercase , lowercase ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(lowercase ) if __name__ == "__main__": lowerCAmelCase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase_ : str = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
63
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _lowerCamelCase ( lowercase : Dict ) -> Any: _a = filter(lambda lowercase : p.requires_grad , model.parameters() ) _a = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_ : int = logging.getLogger(__name__) def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any: if metric == "rouge2": _a = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": _a = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": _a = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' " function." ) _a = ModelCheckpoint( dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]: return EarlyStopping( monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , ) class __SCREAMING_SNAKE_CASE (pl.Callback ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): _a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__a ) @rank_zero_only def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ): logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) _a = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results _a = Path(pl_module.hparams.output_dir ) if type_path == "test": _a = od / "test_results.txt" _a = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _a = od / f'{type_path}_results/{trainer.global_step:05d}.txt' _a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=__a ) generations_file.parent.mkdir(exist_ok=__a ) with open(__a , "a+" ) as writer: for key in sorted(__a ): if key in ["log", "progress_bar", "preds"]: continue _a = metrics[key] if isinstance(__a , torch.Tensor ): _a = val.item() _a = f'{key}: {val:.6f}\n' writer.write(__a ) if not save_generations: return if "preds" in metrics: _a = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__a ) @rank_zero_only def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ): try: _a = pl_module.model.model.num_parameters() except AttributeError: _a = pl_module.model.num_parameters() _a = count_trainable_parameters(__a ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} ) @rank_zero_only def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__a , __a , "test" ) @rank_zero_only def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
63
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase_ : Union[str, Any] = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[str] = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Dict = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
'''simple docstring''' import math class __SCREAMING_SNAKE_CASE : """simple docstring""" def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ): _a = 0.0 _a = 0.0 for i in range(len(__a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ): for i in range(len(__a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _lowerCamelCase ( ) -> None: # Training Examples ( m, n ) _a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _a = SelfOrganizingMap() _a = 3 _a = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _a = training_samples[j] # Compute the winning vector _a = self_organizing_map.get_winner(lowercase , lowercase ) # Update the winning vector _a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase ) # classify test sample _a = [0, 0, 0, 1] _a = self_organizing_map.get_winner(lowercase , lowercase ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
63
1
'''simple docstring''' from __future__ import annotations def _lowerCamelCase ( lowercase : list[int] , lowercase : int ) -> bool: if len(lowercase ) == 0: return False _a = len(lowercase ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , lowercase ) else: return binary_search(a_list[midpoint + 1 :] , lowercase ) if __name__ == "__main__": lowerCAmelCase_ : Any = input('Enter numbers separated by comma:\n').strip() lowerCAmelCase_ : List[Any] = [int(item.strip()) for item in user_input.split(',')] lowerCAmelCase_ : Dict = int(input('Enter the number to be found in the list:\n').strip()) lowerCAmelCase_ : int = '' if binary_search(sequence, target) else 'not ' print(f"""{target} was {not_str}found in {sequence}""")
63
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='OwlViTImageProcessor' __a =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )): _a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )] elif isinstance(__a , __a ) and isinstance(text[0] , __a ): _a = [] # Maximum number of queries across batch _a = max([len(__a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__a ) != max_num_queries: _a = t + [" "] * (max_num_queries - len(__a )) _a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a ) encodings.append(__a ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _a = BatchEncoding() _a = input_ids _a = attention_mask if query_images is not None: _a = BatchEncoding() _a = self.image_processor( __a , return_tensors=__a , **__a ).pixel_values _a = query_pixel_values if images is not None: _a = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: _a = image_features.pixel_values return encoding elif query_images is not None and images is not None: _a = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ): return self.image_processor.post_process(*__a , **__a ) def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ): return self.image_processor.post_process_object_detection(*__a , **__a ) def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ): return self.image_processor.post_process_image_guided_detection(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : str ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
1
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ : str = get_tests_dir('fixtures/test_sentencepiece.model') lowerCAmelCase_ : Optional[int] = {'target_lang': 'fi', 'source_lang': 'en'} lowerCAmelCase_ : Optional[int] = '>>zh<<' lowerCAmelCase_ : Tuple = 'Helsinki-NLP/' if is_torch_available(): lowerCAmelCase_ : int = 'pt' elif is_tf_available(): lowerCAmelCase_ : Any = 'tf' else: lowerCAmelCase_ : str = 'jax' @require_sentencepiece class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =MarianTokenizer __a =False __a =True def UpperCamelCase__ ( self : List[str] ): super().setUp() _a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] _a = dict(zip(__a , range(len(__a ) ) ) ) _a = Path(self.tmpdirname ) save_json(__a , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(__a , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(__a , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(__a , save_dir / VOCAB_FILES_NAMES["target_spm"] ) _a = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self : Optional[int] , **__a : Any ): return MarianTokenizer.from_pretrained(self.tmpdirname , **__a ) def UpperCamelCase__ ( self : int , __a : List[Any] ): return ( "This is a test", "This is a test", ) def UpperCamelCase__ ( self : Any ): _a = "</s>" _a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def UpperCamelCase__ ( self : int ): _a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(__a ) , 9 ) def UpperCamelCase__ ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def UpperCamelCase__ ( self : List[Any] ): _a = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' ) _a = en_de_tokenizer(["I am a small frog"] , return_tensors=__a ) self.assertIsInstance(__a , __a ) _a = [38, 1_21, 14, 6_97, 3_88_48, 0] self.assertListEqual(__a , batch.input_ids[0] ) _a = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(__a ) _a = [x.name for x in Path(__a ).glob("*" )] self.assertIn("source.spm" , __a ) MarianTokenizer.from_pretrained(__a ) def UpperCamelCase__ ( self : List[str] ): _a = self.get_tokenizer() _a = tok( ["I am a small frog" * 10_00, "I am a small frog"] , padding=__a , truncation=__a , return_tensors=__a ) self.assertIsInstance(__a , __a ) self.assertEqual(batch.input_ids.shape , (2, 5_12) ) def UpperCamelCase__ ( self : str ): _a = self.get_tokenizer() _a = tok(["I am a tiny frog", "I am a small frog"] , padding=__a , return_tensors=__a ) self.assertIsInstance(__a , __a ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def UpperCamelCase__ ( self : List[Any] ): # fmt: off _a = {"input_ids": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def UpperCamelCase__ ( self : List[Any] ): _a = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) _a = "Tämä on testi" _a = "This is a test" _a = [76, 7, 20_47, 2] _a = [69, 12, 11, 9_40, 2] _a = tokenizer(__a ).input_ids self.assertListEqual(__a , __a ) _a = tokenizer(text_target=__a ).input_ids self.assertListEqual(__a , __a ) _a = tokenizer.decode(__a , skip_special_tokens=__a ) self.assertEqual(__a , __a )
63
'''simple docstring''' def _lowerCamelCase ( lowercase : str ) -> list: if n_term == "": return [] _a = [] for temp in range(int(lowercase ) ): series.append(F'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
63
1
'''simple docstring''' from functools import reduce lowerCAmelCase_ : Optional[Any] = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def _lowerCamelCase ( lowercase : str = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda lowercase , lowercase : str(int(lowercase ) * int(lowercase ) ) , n[i : i + 13] ) ) for i in range(len(lowercase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
63
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) lowerCAmelCase_ : List[str] = logging.getLogger(__name__) lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE def _lowerCamelCase ( ) -> Optional[int]: _a = argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." ) _a = parser.parse_args() return args def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]: try: if args.tpu_name: _a = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: _a = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(lowercase ) tf.tpu.experimental.initialize_tpu_system(lowercase ) return tpu def _lowerCamelCase ( lowercase : List[str] ) -> Any: _a = 0 for file in file_list: _a = file.split("/" )[-1] _a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 ) _a = int(lowercase ) num_samples += sample_count return num_samples def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int: _a = count_samples(lowercase ) _a = tf.data.Dataset.from_tensor_slices(lowercase ) if shuffle: _a = dataset.shuffle(len(lowercase ) ) _a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here _a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) if shuffle: assert shuffle_buffer_size is not None _a = dataset.shuffle(args.shuffle_buffer_size ) _a = dataset.batch(lowercase , drop_remainder=lowercase ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) _a = dataset.prefetch(lowercase ) return dataset def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict: if not args.no_tpu: _a = initialize_tpu(lowercase ) _a = tf.distribute.TPUStrategy(lowercase ) else: _a = tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) _a = AutoTokenizer.from_pretrained(args.tokenizer ) _a = AutoConfig.from_pretrained(args.pretrained_model_config ) _a = tokenizer.vocab_size _a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' ) _a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' ) _a = count_samples(lowercase ) _a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) _a = steps_per_epoch * args.num_epochs with strategy.scope(): _a = TFAutoModelForMaskedLM.from_config(lowercase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built _a , _a = create_optimizer( num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowercase , metrics=["accuracy"] ) def decode_fn(lowercase : int ): _a = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(lowercase , lowercase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. _a = DataCollatorForLanguageModeling( tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" ) def mask_with_collator(lowercase : List[Any] ): # TF really needs an isin() function _a = ( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) _a , _a = data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , ) return batch _a = args.per_replica_batch_size * strategy.num_replicas_in_sync _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , ) _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , ) _a = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) ) model.fit( lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": lowerCAmelCase_ : Any = parse_args() main(args)
63
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ : Optional[Any] = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Any = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowerCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='LayoutLMv3ImageProcessor' __a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor _a = self.image_processor(images=__a , return_tensors=__a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__a , __a ): _a = [text] # add batch dimension (as the image processor always adds a batch dimension) _a = features["words"] _a = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) # add pixel values _a = features.pop("pixel_values" ) if return_overflowing_tokens is True: _a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] ) _a = images return encoded_inputs def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _a = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__a ) != len(__a ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f' {len(__a )} and {len(__a )}' ) return images_with_overflow def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : Tuple ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCamelCase__ ( self : int ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
1
'''simple docstring''' def _lowerCamelCase ( lowercase : float , lowercase : int ) -> float: if digit_amount > 0: return round(number - int(lowercase ) , lowercase ) return number - int(lowercase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
63
'''simple docstring''' from ....utils import logging lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ): _a = config.__dict__ _a = modal_hidden_size if num_labels: _a = num_labels
63
1
'''simple docstring''' from __future__ import annotations def _lowerCamelCase ( lowercase : float , lowercase : float , lowercase : float , ) -> tuple[str, float]: if (stress, tangential_force, area).count(0 ) != 1: raise ValueError("You cannot supply more or less than 2 values" ) elif stress < 0: raise ValueError("Stress cannot be negative" ) elif tangential_force < 0: raise ValueError("Tangential Force cannot be negative" ) elif area < 0: raise ValueError("Area cannot be negative" ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
63
'''simple docstring''' def _lowerCamelCase ( lowercase : int = 100 ) -> int: _a = 0 _a = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
63
1