code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def a__ ( A__, A__ ): return int((input_a, input_a).count(1 ) != 0 ) def a__ ( ): assert or_gate(0, 0 ) == 0 assert or_gate(0, 1 ) == 1 assert or_gate(1, 0 ) == 1 assert or_gate(1, 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
101
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
101
1
"""simple docstring""" import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''') # TF training parameters __lowercase = False __lowercase = False def lowerCAmelCase (__UpperCamelCase : Namespace ): """simple docstring""" return TrainCommand(__UpperCamelCase ) class _lowercase ( __a ): """simple docstring""" @staticmethod def UpperCAmelCase_ ( UpperCamelCase__ : ArgumentParser ) -> Optional[int]: '''simple docstring''' __UpperCamelCase =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' ) train_parser.add_argument( '''--train_data''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , ) train_parser.add_argument( '''--column_label''' , type=UpperCamelCase__ , default=0 , help='''Column of the dataset csv file with example labels.''' ) train_parser.add_argument( '''--column_text''' , type=UpperCamelCase__ , default=1 , help='''Column of the dataset csv file with example texts.''' ) train_parser.add_argument( '''--column_id''' , type=UpperCamelCase__ , default=2 , help='''Column of the dataset csv file with example ids.''' ) train_parser.add_argument( '''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' ) train_parser.add_argument('''--validation_data''' , type=UpperCamelCase__ , default='''''' , help='''path to validation dataset.''' ) train_parser.add_argument( '''--validation_split''' , type=UpperCamelCase__ , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , ) train_parser.add_argument('''--output''' , type=UpperCamelCase__ , default='''./''' , help='''path to saved the trained model.''' ) train_parser.add_argument( '''--task''' , type=UpperCamelCase__ , default='''text_classification''' , help='''Task to train the model on.''' ) train_parser.add_argument( '''--model''' , type=UpperCamelCase__ , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' ) train_parser.add_argument('''--train_batch_size''' , type=UpperCamelCase__ , default=32 , help='''Batch size for training.''' ) train_parser.add_argument('''--valid_batch_size''' , type=UpperCamelCase__ , default=64 , help='''Batch size for validation.''' ) train_parser.add_argument('''--learning_rate''' , type=UpperCamelCase__ , default=3E-5 , help='''Learning rate.''' ) train_parser.add_argument('''--adam_epsilon''' , type=UpperCamelCase__ , default=1E-08 , help='''Epsilon for Adam optimizer.''' ) train_parser.set_defaults(func=UpperCamelCase__ ) def __init__( self : Union[str, Any] , UpperCamelCase__ : Namespace ) -> Optional[int]: '''simple docstring''' __UpperCamelCase =logging.get_logger('''transformers-cli/training''' ) __UpperCamelCase ='''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output , exist_ok=UpperCamelCase__ ) __UpperCamelCase =args.output __UpperCamelCase =args.column_label __UpperCamelCase =args.column_text __UpperCamelCase =args.column_id self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" ) if args.task == "text_classification": __UpperCamelCase =TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f"""Loading dataset from {args.train_data}""" ) __UpperCamelCase =Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __UpperCamelCase =None if args.validation_data: self.logger.info(f"""Loading validation dataset from {args.validation_data}""" ) __UpperCamelCase =Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __UpperCamelCase =args.validation_split __UpperCamelCase =args.train_batch_size __UpperCamelCase =args.valid_batch_size __UpperCamelCase =args.learning_rate __UpperCamelCase =args.adam_epsilon def UpperCAmelCase_ ( self : Tuple ) -> Tuple: '''simple docstring''' if self.framework == "tf": return self.run_tf() return self.run_torch() def UpperCAmelCase_ ( self : Any ) -> int: '''simple docstring''' raise NotImplementedError def UpperCAmelCase_ ( self : Optional[int] ) -> Any: '''simple docstring''' self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
296
"""simple docstring""" from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __lowercase = 299_792_458 # Symbols __lowercase , __lowercase , __lowercase , __lowercase = symbols('''ct x y z''') def lowerCAmelCase (__UpperCamelCase : float ): """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def lowerCAmelCase (__UpperCamelCase : float ): """simple docstring""" return 1 / sqrt(1 - beta(__UpperCamelCase ) ** 2 ) def lowerCAmelCase (__UpperCamelCase : float ): """simple docstring""" return np.array( [ [gamma(__UpperCamelCase ), -gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), 0, 0], [-gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), gamma(__UpperCamelCase ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowerCAmelCase (__UpperCamelCase : float , __UpperCamelCase : np.ndarray | None = None ): """simple docstring""" if event is None: __UpperCamelCase =np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(__UpperCamelCase ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __lowercase = transform(29_979_245) print('''Example of four vector: ''') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values __lowercase = {ct: c, x: 1, y: 1, z: 1} __lowercase = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
296
1
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=36 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=6 , __UpperCAmelCase=6 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=1000 , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = text_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = coordinate_size __lowerCamelCase = shape_size __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __lowerCamelCase = text_seq_length __lowerCamelCase = (image_size // patch_size) ** 2 + 1 __lowerCamelCase = self.text_seq_length + self.image_seq_length def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __lowerCamelCase = bbox[i, j, 3] __lowerCamelCase = bbox[i, j, 1] __lowerCamelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: __lowerCamelCase = bbox[i, j, 2] __lowerCamelCase = bbox[i, j, 0] __lowerCamelCase = t __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __lowerCamelCase = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = LayoutLMvaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # text + image __lowerCamelCase = model(__UpperCAmelCase , pixel_values=__UpperCAmelCase ) __lowerCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __lowerCamelCase = model(pixel_values=__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = LayoutLMvaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = LayoutLMvaForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = LayoutLMvaForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ = ( {"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel} if is_torch_available() else {} ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = LayoutLMvaModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__UpperCAmelCase , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) elif model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) elif model_class in [ *get_values(__UpperCAmelCase ), ]: __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) elif model_class in [ *get_values(__UpperCAmelCase ), ]: __lowerCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__UpperCAmelCase , ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = LayoutLMvaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def a__ ( ): __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) if is_vision_available() else None @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(__UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).pixel_values.to(__UpperCAmelCase ) __lowerCamelCase = torch.tensor([[1, 2]] ) __lowerCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass __lowerCamelCase = model( input_ids=input_ids.to(__UpperCAmelCase ) , bbox=bbox.to(__UpperCAmelCase ) , pixel_values=pixel_values.to(__UpperCAmelCase ) , ) # verify the logits __lowerCamelCase = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
175
import math def a__ ( _UpperCamelCase : float ,_UpperCamelCase : float ): if ( not isinstance(_UpperCamelCase ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * power_factor def a__ ( _UpperCamelCase : float ,_UpperCamelCase : float ): if ( not isinstance(_UpperCamelCase ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
175
1
import inspect import unittest class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> Any: try: import diffusers # noqa: F401 except ImportError: assert False def _UpperCAmelCase ( self ) -> List[str]: import diffusers from diffusers.dependency_versions_table import deps A = inspect.getmembers(a__ , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": A = """k-diffusion""" elif backend == "invisible_watermark": A = """invisible-watermark""" assert backend in deps, f'{backend} is not in the deps table!'
546
import math def _lowerCAmelCase ( UpperCamelCase__: int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCAmelCase ( UpperCamelCase__: float = 0.1 ) -> int: """simple docstring""" A = 3 A = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(UpperCamelCase__ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
546
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable a_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""DPTFeatureExtractor"""] a_ = ["""DPTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """DPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DPTForDepthEstimation""", """DPTForSemanticSegmentation""", """DPTModel""", """DPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
177
"""simple docstring""" def __lowercase ( snake_case_ : int ) ->int: '''simple docstring''' __A : str = 1 for i in range(1 ,num + 1 ): fact *= i return fact def __lowercase ( snake_case_ : int ) ->int: '''simple docstring''' __A : str = 0 while number > 0: __A : Optional[int] = number % 10 sum_of_digits += last_digit __A : Dict = number // 10 # Removing the last_digit from the given number return sum_of_digits def __lowercase ( snake_case_ : int = 100 ) ->int: '''simple docstring''' __A : Optional[int] = factorial(snake_case_ ) __A : Optional[Any] = split_and_add(snake_case_ ) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
177
1
from __future__ import annotations from random import random from typing import Generic, TypeVar UpperCamelCase_ = TypeVar("KT") UpperCamelCase_ = TypeVar("VT") class a ( Generic[KT, VT] ): def __init__( self : Optional[Any] , snake_case__ : KT | str = "root" , snake_case__ : VT | None = None ): """simple docstring""" __lowerCAmelCase = key __lowerCAmelCase = value __lowerCAmelCase = [] def __repr__( self : Tuple ): """simple docstring""" return F"Node({self.key}: {self.value})" @property def UpperCAmelCase__ ( self : str ): """simple docstring""" return len(self.forward ) class a ( Generic[KT, VT] ): def __init__( self : str , snake_case__ : float = 0.5 , snake_case__ : int = 16 ): """simple docstring""" __lowerCAmelCase = Node[KT, VT]() __lowerCAmelCase = 0 __lowerCAmelCase = p __lowerCAmelCase = max_level def __str__( self : Any ): """simple docstring""" __lowerCAmelCase = list(self ) if len(snake_case__ ) == 0: return F"SkipList(level={self.level})" __lowerCAmelCase = max((len(str(snake_case__ ) ) for item in items) , default=4 ) __lowerCAmelCase = max(snake_case__ , 4 ) + 4 __lowerCAmelCase = self.head __lowerCAmelCase = [] __lowerCAmelCase = node.forward.copy() lines.append(F"[{node.key}]".ljust(snake_case__ , "-" ) + "* " * len(snake_case__ ) ) lines.append(" " * label_size + "| " * len(snake_case__ ) ) while len(node.forward ) != 0: __lowerCAmelCase = node.forward[0] lines.append( F"[{node.key}]".ljust(snake_case__ , "-" ) + " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) ) lines.append(" " * label_size + "| " * len(snake_case__ ) ) __lowerCAmelCase = node.forward lines.append("None".ljust(snake_case__ ) + "* " * len(snake_case__ ) ) return F"SkipList(level={self.level})\n" + "\n".join(snake_case__ ) def __iter__( self : List[Any] ): """simple docstring""" __lowerCAmelCase = self.head while len(node.forward ) != 0: yield node.forward[0].key __lowerCAmelCase = node.forward[0] def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __lowerCAmelCase = 1 while random() < self.p and level < self.max_level: level += 1 return level def UpperCAmelCase__ ( self : List[Any] , snake_case__ : int ): """simple docstring""" __lowerCAmelCase = [] __lowerCAmelCase = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: __lowerCAmelCase = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(snake_case__ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def UpperCAmelCase__ ( self : List[str] , snake_case__ : KT ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self._locate_node(snake_case__ ) if node is not None: for i, update_node in enumerate(snake_case__ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: __lowerCAmelCase = node.forward[i] else: __lowerCAmelCase = update_node.forward[:i] def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : KT , snake_case__ : VT ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self._locate_node(snake_case__ ) if node is not None: __lowerCAmelCase = value else: __lowerCAmelCase = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , snake_case__ ): update_vector.append(self.head ) __lowerCAmelCase = level __lowerCAmelCase = Node(snake_case__ , snake_case__ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(snake_case__ ) else: __lowerCAmelCase = new_node def UpperCAmelCase__ ( self : str , snake_case__ : VT ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self._locate_node(snake_case__ ) if node is not None: return node.value return None def _UpperCAmelCase ( ): """simple docstring""" __lowerCAmelCase = SkipList() skip_list.insert("Key1" , 3 ) skip_list.insert("Key2" , 1_2 ) skip_list.insert("Key3" , 4_1 ) skip_list.insert("Key4" , -1_9 ) __lowerCAmelCase = skip_list.head __lowerCAmelCase = {} while node.level != 0: __lowerCAmelCase = node.forward[0] __lowerCAmelCase = node.value assert len(UpperCamelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 1_2 assert all_values["Key3"] == 4_1 assert all_values["Key4"] == -1_9 def _UpperCAmelCase ( ): """simple docstring""" __lowerCAmelCase = SkipList() skip_list.insert("Key1" , 1_0 ) skip_list.insert("Key1" , 1_2 ) skip_list.insert("Key5" , 7 ) skip_list.insert("Key7" , 1_0 ) skip_list.insert("Key10" , 5 ) skip_list.insert("Key7" , 7 ) skip_list.insert("Key5" , 5 ) skip_list.insert("Key10" , 1_0 ) __lowerCAmelCase = skip_list.head __lowerCAmelCase = {} while node.level != 0: __lowerCAmelCase = node.forward[0] __lowerCAmelCase = node.value if len(UpperCamelCase ) != 4: print() assert len(UpperCamelCase ) == 4 assert all_values["Key1"] == 1_2 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 1_0 def _UpperCAmelCase ( ): """simple docstring""" __lowerCAmelCase = SkipList() assert skip_list.find("Some key" ) is None def _UpperCAmelCase ( ): """simple docstring""" __lowerCAmelCase = SkipList() skip_list.insert("Key2" , 2_0 ) assert skip_list.find("Key2" ) == 2_0 skip_list.insert("Some Key" , 1_0 ) skip_list.insert("Key2" , 8 ) skip_list.insert("V" , 1_3 ) assert skip_list.find("Y" ) is None assert skip_list.find("Key2" ) == 8 assert skip_list.find("Some Key" ) == 1_0 assert skip_list.find("V" ) == 1_3 def _UpperCAmelCase ( ): """simple docstring""" __lowerCAmelCase = SkipList() skip_list.delete("Some key" ) assert len(skip_list.head.forward ) == 0 def _UpperCAmelCase ( ): """simple docstring""" __lowerCAmelCase = SkipList() skip_list.insert("Key1" , 1_2 ) skip_list.insert("V" , 1_3 ) skip_list.insert("X" , 1_4 ) skip_list.insert("Key2" , 1_5 ) skip_list.delete("V" ) skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("Key2" ) is None def _UpperCAmelCase ( ): """simple docstring""" __lowerCAmelCase = SkipList() skip_list.insert("Key1" , 1_2 ) skip_list.insert("V" , 1_3 ) skip_list.insert("X" , 1_4 ) skip_list.insert("Key2" , 1_5 ) skip_list.delete("V" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) == 1_4 assert skip_list.find("Key1" ) == 1_2 assert skip_list.find("Key2" ) == 1_5 skip_list.delete("X" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) == 1_2 assert skip_list.find("Key2" ) == 1_5 skip_list.delete("Key1" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) == 1_5 skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) is None def _UpperCAmelCase ( ): """simple docstring""" __lowerCAmelCase = SkipList() skip_list.insert("Key1" , 1_2 ) skip_list.insert("V" , 1_3 ) skip_list.insert("X" , 1_4_2 ) skip_list.insert("Key2" , 1_5 ) skip_list.delete("X" ) def traverse_keys(UpperCamelCase: Union[str, Any] ): yield node.key for forward_node in node.forward: yield from traverse_keys(UpperCamelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def _UpperCAmelCase ( ): """simple docstring""" def is_sorted(UpperCamelCase: Union[str, Any] ): return all(next_item >= item for item, next_item in zip(UpperCamelCase , lst[1:] ) ) __lowerCAmelCase = SkipList() for i in range(1_0 ): skip_list.insert(UpperCamelCase , UpperCamelCase ) assert is_sorted(list(UpperCamelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(UpperCamelCase ) ) skip_list.insert(-1_2 , -1_2 ) skip_list.insert(7_7 , 7_7 ) assert is_sorted(list(UpperCamelCase ) ) def _UpperCAmelCase ( ): """simple docstring""" for _ in range(1_0_0 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def _UpperCAmelCase ( ): """simple docstring""" __lowerCAmelCase = SkipList() skip_list.insert(2 , "2" ) skip_list.insert(4 , "4" ) skip_list.insert(6 , "4" ) skip_list.insert(4 , "5" ) skip_list.insert(8 , "4" ) skip_list.insert(9 , "4" ) skip_list.delete(4 ) print(UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
376
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class a : def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any]=2 , snake_case__ : Any=3 , snake_case__ : str=4 , snake_case__ : Tuple=2 , snake_case__ : Optional[Any]=7 , snake_case__ : int=True , snake_case__ : Dict=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Union[str, Any]=99 , snake_case__ : str=36 , snake_case__ : Tuple=2 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[Any]=37 , snake_case__ : int="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Optional[Any]=512 , snake_case__ : Union[str, Any]=16 , snake_case__ : Optional[int]=2 , snake_case__ : Any=0.0_2 , snake_case__ : List[str]=6 , snake_case__ : Optional[int]=6 , snake_case__ : Any=3 , snake_case__ : str=4 , snake_case__ : int=None , snake_case__ : Any=1_000 , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = is_training __lowerCAmelCase = use_input_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = coordinate_size __lowerCAmelCase = shape_size __lowerCAmelCase = num_labels __lowerCAmelCase = num_choices __lowerCAmelCase = scope __lowerCAmelCase = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __lowerCAmelCase = text_seq_length __lowerCAmelCase = (image_size // patch_size) ** 2 + 1 __lowerCAmelCase = self.text_seq_length + self.image_seq_length def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __lowerCAmelCase = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __lowerCAmelCase = bbox[i, j, 3] __lowerCAmelCase = bbox[i, j, 1] __lowerCAmelCase = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __lowerCAmelCase = bbox[i, j, 2] __lowerCAmelCase = bbox[i, j, 0] __lowerCAmelCase = tmp_coordinate __lowerCAmelCase = tf.constant(snake_case__ ) __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_input_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] ) __lowerCAmelCase = None if self.use_token_type_ids: __lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __lowerCAmelCase = None __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __lowerCAmelCase = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Tuple ): """simple docstring""" __lowerCAmelCase = TFLayoutLMvaModel(config=snake_case__ ) # text + image __lowerCAmelCase = model(snake_case__ , pixel_values=snake_case__ , training=snake_case__ ) __lowerCAmelCase = model( snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , training=snake_case__ , ) __lowerCAmelCase = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __lowerCAmelCase = model(snake_case__ , training=snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __lowerCAmelCase = model({"pixel_values": pixel_values} , training=snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Optional[int] ): """simple docstring""" __lowerCAmelCase = self.num_labels __lowerCAmelCase = TFLayoutLMvaForSequenceClassification(config=snake_case__ ) __lowerCAmelCase = model( snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , training=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple ): """simple docstring""" __lowerCAmelCase = self.num_labels __lowerCAmelCase = TFLayoutLMvaForTokenClassification(config=snake_case__ ) __lowerCAmelCase = model( snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , training=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[Any] ): """simple docstring""" __lowerCAmelCase = 2 __lowerCAmelCase = TFLayoutLMvaForQuestionAnswering(config=snake_case__ ) __lowerCAmelCase = model( snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , training=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() ((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) = config_and_inputs __lowerCAmelCase = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): lowercase_ : List[Any] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase_ : Optional[int] = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase_ : Dict = False lowercase_ : str = False lowercase_ : Any = False def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : int ): """simple docstring""" return True def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Tuple=False ): """simple docstring""" __lowerCAmelCase = copy.deepcopy(snake_case__ ) if model_class in get_values(snake_case__ ): __lowerCAmelCase = { k: tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(snake_case__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(snake_case__ ): __lowerCAmelCase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(snake_case__ ): __lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(snake_case__ ): __lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(snake_case__ ): __lowerCAmelCase = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __lowerCAmelCase = TFLayoutLMvaModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(snake_case__ ) if getattr(snake_case__ , "hf_compute_loss" , snake_case__ ): # The number of elements in the loss should be the same as the number of elements in the label __lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ ) __lowerCAmelCase = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=snake_case__ )[0] ] __lowerCAmelCase = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ ) __lowerCAmelCase = prepared_for_class.pop("input_ids" ) __lowerCAmelCase = model(snake_case__ , **snake_case__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ ) __lowerCAmelCase = prepared_for_class.pop("input_ids" ) if "labels" in prepared_for_class: __lowerCAmelCase = prepared_for_class["labels"].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __lowerCAmelCase = -100 __lowerCAmelCase = tf.convert_to_tensor(snake_case__ ) __lowerCAmelCase = model(snake_case__ , **snake_case__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ ) __lowerCAmelCase = model(snake_case__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ ) # Get keys that were added with the _prepare_for_class function __lowerCAmelCase = prepared_for_class.keys() - inputs_dict.keys() __lowerCAmelCase = inspect.signature(model.call ).parameters __lowerCAmelCase = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __lowerCAmelCase = {0: "input_ids"} for label_key in label_keys: __lowerCAmelCase = signature_names.index(snake_case__ ) __lowerCAmelCase = label_key __lowerCAmelCase = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __lowerCAmelCase = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __lowerCAmelCase = prepared_for_class[value] __lowerCAmelCase = tuple(snake_case__ ) # Send to model __lowerCAmelCase = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCAmelCase = type self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self : str ): """simple docstring""" ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) @slow def UpperCAmelCase__ ( self : int ): """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = TFLayoutLMvaModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def _UpperCAmelCase ( ): """simple docstring""" __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf class a ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=snake_case__ ) if is_vision_available() else None @slow def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __lowerCAmelCase = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="tf" ).pixel_values __lowerCAmelCase = tf.constant([[1, 2]] ) __lowerCAmelCase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __lowerCAmelCase = model(input_ids=snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , training=snake_case__ ) # verify the logits __lowerCAmelCase = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , snake_case__ ) __lowerCAmelCase = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1E-4 ) )
376
1
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() __snake_case : int = logging.get_logger(__name__) def __lowerCamelCase ( __snake_case : Any, __snake_case : Optional[int], __snake_case : int, __snake_case : Optional[int] ) -> Tuple: """simple docstring""" A__ : List[str] =original_name.split(""".""" )[0] A__ : int =key.split(""".""" ) A__ : int =int(key_list[key_list.index(__snake_case ) - 2] ) A__ : str =int(key_list[key_list.index(__snake_case ) - 1] ) A__ : Dict =orig_block_num - offset A__ : List[str] =key.replace(f"{orig_block_num}.{layer_num}.{original_name}", f"block.{new_block_num}.{layer_num}.{new_name}" ) return key def __lowerCamelCase ( __snake_case : Optional[int] ) -> List[Any]: """simple docstring""" A__ : str =OrderedDict() A__ , A__ : Dict =0, 0 for key, value in state_dict.items(): if key.startswith("""network""" ): A__ : Optional[int] =key.replace("""network""", """poolformer.encoder""" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("""bias""" ) and "patch_embed" not in key: patch_emb_offset += 1 A__ : Union[str, Any] =key[: key.find("""proj""" )] A__ : Dict =key.replace(__snake_case, f"patch_embeddings.{total_embed_found}." ) A__ : str =key.replace("""proj""", """projection""" ) if key.endswith("""bias""" ): total_embed_found += 1 if "patch_embeddings" in key: A__ : Any ="""poolformer.encoder.""" + key if "mlp.fc1" in key: A__ : Union[str, Any] =replace_key_with_offset(__snake_case, __snake_case, """mlp.fc1""", """output.conv1""" ) if "mlp.fc2" in key: A__ : str =replace_key_with_offset(__snake_case, __snake_case, """mlp.fc2""", """output.conv2""" ) if "norm1" in key: A__ : Dict =replace_key_with_offset(__snake_case, __snake_case, """norm1""", """before_norm""" ) if "norm2" in key: A__ : Union[str, Any] =replace_key_with_offset(__snake_case, __snake_case, """norm2""", """after_norm""" ) if "layer_scale_1" in key: A__ : int =replace_key_with_offset(__snake_case, __snake_case, """layer_scale_1""", """layer_scale_1""" ) if "layer_scale_2" in key: A__ : Optional[Any] =replace_key_with_offset(__snake_case, __snake_case, """layer_scale_2""", """layer_scale_2""" ) if "head" in key: A__ : int =key.replace("""head""", """classifier""" ) A__ : Tuple =value return new_state_dict def __lowerCamelCase ( ) -> Tuple: """simple docstring""" A__ : List[Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg""" A__ : Optional[Any] =Image.open(requests.get(__snake_case, stream=__snake_case ).raw ) return image @torch.no_grad() def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Union[str, Any], __snake_case : Tuple ) -> List[str]: """simple docstring""" A__ : Optional[int] =PoolFormerConfig() # set attributes based on model_name A__ : Tuple ="""huggingface/label-files""" A__ : Optional[Any] =model_name[-3:] A__ : int =1_000 A__ : str ="""imagenet-1k-id2label.json""" A__ : List[Any] =(1, 1_000) # set config attributes A__ : int =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) ) A__ : List[Any] ={int(__snake_case ): v for k, v in idalabel.items()} A__ : List[Any] =idalabel A__ : Optional[Any] ={v: k for k, v in idalabel.items()} if size == "s12": A__ : Union[str, Any] =[2, 2, 6, 2] A__ : Union[str, Any] =[64, 128, 320, 512] A__ : Union[str, Any] =4.0 A__ : Tuple =0.9 elif size == "s24": A__ : Union[str, Any] =[4, 4, 12, 4] A__ : Any =[64, 128, 320, 512] A__ : List[str] =4.0 A__ : Dict =0.9 elif size == "s36": A__ : Tuple =[6, 6, 18, 6] A__ : Any =[64, 128, 320, 512] A__ : Optional[int] =4.0 A__ : List[Any] =1E-6 A__ : str =0.9 elif size == "m36": A__ : str =[6, 6, 18, 6] A__ : Optional[Any] =[96, 192, 384, 768] A__ : str =4.0 A__ : List[Any] =1E-6 A__ : List[str] =0.95 elif size == "m48": A__ : Union[str, Any] =[8, 8, 24, 8] A__ : Any =[96, 192, 384, 768] A__ : List[str] =4.0 A__ : int =1E-6 A__ : Tuple =0.95 else: raise ValueError(f"Size {size} not supported" ) # load image processor A__ : Any =PoolFormerImageProcessor(crop_pct=__snake_case ) # Prepare image A__ : List[str] =prepare_img() A__ : List[str] =image_processor(images=__snake_case, return_tensors="""pt""" ).pixel_values logger.info(f"Converting model {model_name}..." ) # load original state dict A__ : Union[str, Any] =torch.load(__snake_case, map_location=torch.device("""cpu""" ) ) # rename keys A__ : Any =rename_keys(__snake_case ) # create HuggingFace model and load state dict A__ : int =PoolFormerForImageClassification(__snake_case ) model.load_state_dict(__snake_case ) model.eval() # Define image processor A__ : str =PoolFormerImageProcessor(crop_pct=__snake_case ) A__ : Dict =image_processor(images=prepare_img(), return_tensors="""pt""" ).pixel_values # forward pass A__ : Dict =model(__snake_case ) A__ : List[Any] =outputs.logits # define expected logit slices for different models if size == "s12": A__ : List[Any] =torch.tensor([-0.30_45, -0.67_58, -0.48_69] ) elif size == "s24": A__ : Optional[int] =torch.tensor([0.44_02, -0.13_74, -0.80_45] ) elif size == "s36": A__ : Any =torch.tensor([-0.60_80, -0.51_33, -0.58_98] ) elif size == "m36": A__ : Union[str, Any] =torch.tensor([0.39_52, 0.22_63, -1.26_68] ) elif size == "m48": A__ : Dict =torch.tensor([0.11_67, -0.06_56, -0.34_23] ) else: raise ValueError(f"Size {size} not supported" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3], __snake_case, atol=1E-2 ) # finally, save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) model.save_pretrained(__snake_case ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": __snake_case : List[str] = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __snake_case : str = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
215
'''simple docstring''' import operator as op __snake_case : Optional[int] = 'scaler.pt' __snake_case : str = 'pytorch_model' __snake_case : Optional[int] = 'random_states' __snake_case : Tuple = 'optimizer' __snake_case : Tuple = 'scheduler' __snake_case : List[Any] = 'pytorch_model.bin' __snake_case : str = 'pytorch_model.bin.index.json' __snake_case : Optional[Any] = 'model.safetensors' __snake_case : Dict = 'model.safetensors.index.json' __snake_case : str = '1.10.2' __snake_case : List[Any] = 'py38' __snake_case : str = '4.17.0' __snake_case : Tuple = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge'] __snake_case : Dict = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2'] __snake_case : Any = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP'] __snake_case : List[str] = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH'] __snake_case : str = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] __snake_case : Union[str, Any] = '2.0.1' __snake_case : Dict = ['pdsh', 'standard', 'openmpi', 'mvapich'] __snake_case : Dict = ['default', 'reduce-overhead', 'max-autotune'] __snake_case : Dict = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 __snake_case : Dict = [ 'nnodes', 'nproc_per_node', 'rdzv_backend', 'rdzv_endpoint', 'rdzv_id', 'rdzv_conf', 'standalone', 'max_restarts', 'monitor_interval', 'start_method', 'role', 'module', 'm', 'no_python', 'run_path', 'log_dir', 'r', 'redirects', 't', 'tee', 'node_rank', 'master_addr', 'master_port', ] __snake_case : str = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM'] __snake_case : List[str] = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
215
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter SCREAMING_SNAKE_CASE = "Create a default config file for Accelerate with only a few flags set." def snake_case__ ( __SCREAMING_SNAKE_CASE="no" , __SCREAMING_SNAKE_CASE = default_json_config_file , __SCREAMING_SNAKE_CASE = False ) -> Optional[Any]: UpperCAmelCase_ = Path(__SCREAMING_SNAKE_CASE ) path.parent.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) if path.exists(): print( f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False UpperCAmelCase_ = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) UpperCAmelCase_ = { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if torch.cuda.is_available(): UpperCAmelCase_ = torch.cuda.device_count() UpperCAmelCase_ = num_gpus UpperCAmelCase_ = False if num_gpus > 1: UpperCAmelCase_ = "MULTI_GPU" else: UpperCAmelCase_ = "NO" elif is_xpu_available() and use_xpu: UpperCAmelCase_ = torch.xpu.device_count() UpperCAmelCase_ = num_xpus UpperCAmelCase_ = False if num_xpus > 1: UpperCAmelCase_ = "MULTI_XPU" else: UpperCAmelCase_ = "NO" elif is_npu_available(): UpperCAmelCase_ = torch.npu.device_count() UpperCAmelCase_ = num_npus UpperCAmelCase_ = False if num_npus > 1: UpperCAmelCase_ = "MULTI_NPU" else: UpperCAmelCase_ = "NO" else: UpperCAmelCase_ = 0 UpperCAmelCase_ = True UpperCAmelCase_ = 1 UpperCAmelCase_ = "NO" UpperCAmelCase_ = ClusterConfig(**__SCREAMING_SNAKE_CASE ) config.to_json_file(__SCREAMING_SNAKE_CASE ) return path def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ = parser.add_parser("default" , parents=__SCREAMING_SNAKE_CASE , help=__SCREAMING_SNAKE_CASE , formatter_class=__SCREAMING_SNAKE_CASE ) parser.add_argument( "--config_file" , default=__SCREAMING_SNAKE_CASE , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , dest="save_location" , ) parser.add_argument( "--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__SCREAMING_SNAKE_CASE , help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , ) parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) return parser def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]: UpperCAmelCase_ = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f'''accelerate configuration saved at {config_file}''' )
706
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class lowerCamelCase ( lowercase__, lowercase__ ): '''simple docstring''' @register_to_config def __init__( self , lowerCAmelCase = 768 , ): super().__init__() UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) ) UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) ) def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ): UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) ) UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) ) return self def A__ ( self , lowerCAmelCase ): UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std return embeds def A__ ( self , lowerCAmelCase ): UpperCAmelCase_ = (embeds * self.std) + self.mean return embeds
23
0
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : List[Any] ): """simple docstring""" __snake_case = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __snake_case = Vector() def a (self : Optional[int] ): """simple docstring""" __snake_case = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_lowercase ) , '''(0,0,0,0,0,1)''' ) def a (self : List[Any] ): """simple docstring""" __snake_case = Vector([1, 2, 3, 4] ) self.assertEqual(len(_lowercase ) , 4 ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = Vector([1, 2] ) __snake_case = Vector([1, 2, 3, 4, 5] ) __snake_case = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __snake_case = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 ) def a (self : int ): """simple docstring""" __snake_case = Vector([1, 2, 3] ) __snake_case = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def a (self : Dict ): """simple docstring""" __snake_case = Vector([1, 2, 3] ) __snake_case = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def a (self : List[Any] ): """simple docstring""" __snake_case = Vector([1, 2, 3] ) __snake_case = Vector([2, -1, 4] ) # for test of dot product __snake_case = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def a (self : Any ): """simple docstring""" self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 ) def a (self : List[str] ): """simple docstring""" self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def a (self : Dict ): """simple docstring""" __snake_case = Vector([1, 2, 3] ) __snake_case = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _lowercase , _lowercase ) ) , '''(3,4,7)''' ) def a (self : Optional[int] ): """simple docstring""" __snake_case = Vector([1, 0, 0, 0, 0, 0] ) __snake_case = x.copy() self.assertEqual(str(_lowercase ) , str(_lowercase ) ) def a (self : int ): """simple docstring""" __snake_case = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_lowercase ) , '''(0,1,0)''' ) def a (self : List[Any] ): """simple docstring""" __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(_lowercase ) ) def a (self : Dict ): """simple docstring""" __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __snake_case = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_lowercase , _lowercase ) ) def a (self : str ): """simple docstring""" __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __snake_case = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_lowercase , _lowercase ) ) def a (self : str ): """simple docstring""" __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def a (self : List[str] ): """simple docstring""" __snake_case = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __snake_case = Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def a (self : Any ): """simple docstring""" __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(_lowercase ) ) def a (self : Dict ): """simple docstring""" __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 ) def a (self : Optional[int] ): """simple docstring""" __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def a (self : List[Any] ): """simple docstring""" __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def a (self : Any ): """simple docstring""" self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
592
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def __lowerCAmelCase ( __UpperCamelCase : int ): '''simple docstring''' if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) snake_case_ : str = precision snake_case_ : Any = ceil(precision / 1_4 ) snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt() snake_case_ : Optional[Any] = 1 snake_case_ : List[str] = 1_3_5_9_1_4_0_9 snake_case_ : Optional[int] = Decimal(__UpperCamelCase ) for k in range(1 , __UpperCamelCase ): snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3) linear_term += 5_4_5_1_4_0_1_3_4 exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __lowerCAmelCase : int = 50 print(F'''The first {n} digits of pi is: {pi(n)}''')
58
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class a ( unittest.TestCase ): UpperCamelCase : Union[str, Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCamelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : str = TextaTextGenerationPipeline(model=UpperCamelCase_ , tokenizer=UpperCamelCase_ ) return generator, ["Something to write", "Something else"] def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : List[Any] = generator('Something there' ) self.assertEqual(UpperCamelCase_ , [{'generated_text': ANY(UpperCamelCase_ )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) UpperCAmelCase__ : Tuple = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ [{'generated_text': ANY(UpperCamelCase_ )}, {'generated_text': ANY(UpperCamelCase_ )}], [{'generated_text': ANY(UpperCamelCase_ )}, {'generated_text': ANY(UpperCamelCase_ )}], ] , ) UpperCAmelCase__ : Optional[int] = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ [{'generated_text': ANY(UpperCamelCase_ )}, {'generated_text': ANY(UpperCamelCase_ )}], [{'generated_text': ANY(UpperCamelCase_ )}, {'generated_text': ANY(UpperCamelCase_ )}], ] , ) with self.assertRaises(UpperCamelCase_ ): generator(4 ) @require_torch def __snake_case ( self ): UpperCAmelCase__ : List[str] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility UpperCAmelCase__ : str = generator('Something there' , do_sample=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , [{'generated_text': ''}] ) UpperCAmelCase__ : Any = 3 UpperCAmelCase__ : Optional[int] = generator( 'Something there' , num_return_sequences=UpperCamelCase_ , num_beams=UpperCamelCase_ , ) UpperCAmelCase__ : str = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase__ : Tuple = generator('This is a test' , do_sample=UpperCamelCase_ , num_return_sequences=2 , return_tensors=UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) UpperCAmelCase__ : str = generator.model.config.eos_token_id UpperCAmelCase__ : Dict = '<pad>' UpperCAmelCase__ : Union[str, Any] = generator( ['This is a test', 'This is a second test'] , do_sample=UpperCamelCase_ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase_ , ) self.assertEqual( UpperCamelCase_ , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def __snake_case ( self ): UpperCAmelCase__ : Any = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility UpperCAmelCase__ : List[Any] = generator('Something there' , do_sample=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , [{'generated_text': ''}] )
254
"""simple docstring""" import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a : def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=32 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=[10, 20, 30, 40] , UpperCamelCase_=[2, 2, 3, 2] , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=["stage2", "stage3", "stage4"] , UpperCamelCase_=[2, 3, 4] , UpperCamelCase_=None , ): UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : Tuple = image_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : List[str] = num_stages UpperCAmelCase__ : Optional[int] = hidden_sizes UpperCAmelCase__ : int = depths UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Optional[int] = use_labels UpperCAmelCase__ : Union[str, Any] = intermediate_size UpperCAmelCase__ : List[str] = hidden_act UpperCAmelCase__ : int = num_labels UpperCAmelCase__ : int = initializer_range UpperCAmelCase__ : Optional[Any] = out_features UpperCAmelCase__ : Tuple = out_indices UpperCAmelCase__ : Dict = scope def __snake_case ( self ): UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Tuple = None if self.use_labels: UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase__ : List[str] = self.get_config() return config, pixel_values, labels def __snake_case ( self ): return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : Optional[Any] = ConvNextModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : int = model(UpperCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : str = ConvNextForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : List[str] = ConvNextBackbone(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : Optional[int] = model(UpperCamelCase_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Dict = ConvNextBackbone(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __snake_case ( self ): UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs UpperCAmelCase__ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class a ( lowercase , lowercase , unittest.TestCase ): UpperCamelCase : Optional[Any] = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) UpperCamelCase : Optional[int] = ( {"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification} if is_torch_available() else {} ) UpperCamelCase : str = True UpperCamelCase : Union[str, Any] = False UpperCamelCase : Any = False UpperCamelCase : Union[str, Any] = False UpperCamelCase : Optional[Any] = False def __snake_case ( self ): UpperCAmelCase__ : str = ConvNextModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def __snake_case ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __snake_case ( self ): return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def __snake_case ( self ): pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def __snake_case ( self ): pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def __snake_case ( self ): pass def __snake_case ( self ): UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : str = model_class(UpperCamelCase_ ) UpperCAmelCase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : List[str] = [*signature.parameters.keys()] UpperCAmelCase__ : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def __snake_case ( self ): UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def __snake_case ( self ): UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase_ ) def __snake_case ( self ): def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : List[Any] = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) UpperCAmelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : List[Any] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : str = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __snake_case ( self ): UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @slow def __snake_case ( self ): for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[Any] = ConvNextModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def lowerCamelCase ( ): UpperCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a ( unittest.TestCase ): @cached_property def __snake_case ( self ): return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def __snake_case ( self ): UpperCAmelCase__ : Optional[Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase_ ) UpperCAmelCase__ : str = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : str = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[Any] = model(**UpperCamelCase_ ) # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) ) @require_torch class a ( unittest.TestCase , lowercase ): UpperCamelCase : str = (ConvNextBackbone,) if is_torch_available() else () UpperCamelCase : List[str] = ConvNextConfig UpperCamelCase : Tuple = False def __snake_case ( self ): UpperCAmelCase__ : List[str] = ConvNextModelTester(self )
254
1
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__) if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=3_0522, type=int) UpperCAmelCase_ : Any = parser.parse_args() logger.info(F'Loading data from {args.data_file}') with open(args.data_file, 'rb') as fp: UpperCAmelCase_ : List[Any] = pickle.load(fp) logger.info('Counting occurrences for MLM.') UpperCAmelCase_ : List[str] = Counter() for tk_ids in data: counter.update(tk_ids) UpperCAmelCase_ : Dict = [0] * args.vocab_size for k, v in counter.items(): UpperCAmelCase_ : int = v logger.info(F'Dump to {args.token_counts_dump}') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
570
def SCREAMING_SNAKE_CASE_ ( __A : int = 2_00_00_00 ) -> int: """simple docstring""" a_ : Tuple = [0 for i in range(n + 1 )] a_ : Any = 1 a_ : Union[str, Any] = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , __A ): a_ : Tuple = 1 a_ : Union[str, Any] = 0 for i in range(__A ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F'{solution() = }')
570
1
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): # Initialise PyTorch model SCREAMING_SNAKE_CASE_ = RemBertConfig.from_json_file(_lowerCAmelCase ) print('''Building PyTorch model from configuration: {}'''.format(str(_lowerCAmelCase ) ) ) SCREAMING_SNAKE_CASE_ = RemBertModel(_lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save pytorch-model print('''Save PyTorch model to {}'''.format(_lowerCAmelCase ) ) torch.save(model.state_dict() , _lowerCAmelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--rembert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained RemBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
89
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class __magic_name__ ( __UpperCAmelCase): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = "Wav2Vec2FeatureExtractor" SCREAMING_SNAKE_CASE__ : List[Any] = "AutoTokenizer" def __init__( self: Tuple , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] ): super().__init__(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = self.feature_extractor SCREAMING_SNAKE_CASE_ = False @classmethod def _A ( cls: List[Any] , _lowerCamelCase: Tuple , **_lowerCamelCase: List[str] ): try: return super().from_pretrained(_lowerCamelCase , **_lowerCamelCase ) except OSError: warnings.warn( f"Loading a tokenizer inside {cls.__name__} from a config that does not" ''' include a `tokenizer_class` attribute is deprecated and will be ''' '''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`''' ''' attribute to either your `config.json` or `tokenizer_config.json` ''' '''file to suppress this warning: ''' , _lowerCamelCase , ) SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) return cls(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) def __call__( self: Union[str, Any] , *_lowerCamelCase: List[Any] , **_lowerCamelCase: str ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowerCamelCase , **_lowerCamelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' ) else: SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: SCREAMING_SNAKE_CASE_ = args[0] SCREAMING_SNAKE_CASE_ = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: SCREAMING_SNAKE_CASE_ = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase ) if text is not None: SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCamelCase , **_lowerCamelCase ) if text is None: return inputs elif audio is None: return encodings else: SCREAMING_SNAKE_CASE_ = encodings['''input_ids'''] return inputs def _A ( self: Optional[int] , *_lowerCamelCase: str , **_lowerCamelCase: List[Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: SCREAMING_SNAKE_CASE_ = args[0] SCREAMING_SNAKE_CASE_ = args[1:] if input_features is not None: SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) if labels is not None: SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase ) if labels is None: return input_features elif input_features is None: return labels else: SCREAMING_SNAKE_CASE_ = labels['''input_ids'''] return input_features def _A ( self: str , *_lowerCamelCase: Dict , **_lowerCamelCase: Dict ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def _A ( self: Optional[int] , *_lowerCamelCase: Optional[int] , **_lowerCamelCase: Tuple ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @contextmanager def _A ( self: Tuple ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = self.tokenizer yield SCREAMING_SNAKE_CASE_ = self.feature_extractor SCREAMING_SNAKE_CASE_ = False
89
1
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging snake_case_ : Optional[Any] = logging.get_logger(__name__) snake_case_ : Any = R''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class A__ ( UpperCamelCase__ ): @add_start_docstrings(_a ) def __call__( self : List[Any] , _a : torch.LongTensor , _a : torch.FloatTensor , **_a : int ) -> bool: """simple docstring""" raise NotImplementedError('''StoppingCriteria needs to be subclassed''' ) class A__ ( UpperCamelCase__ ): def __init__( self : List[str] , _a : int , _a : Optional[int] = None ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE =max_length _SCREAMING_SNAKE_CASE =max_position_embeddings @add_start_docstrings(_a ) def __call__( self : str , _a : torch.LongTensor , _a : torch.FloatTensor , **_a : str ) -> bool: """simple docstring""" _SCREAMING_SNAKE_CASE =input_ids.shape[-1] _SCREAMING_SNAKE_CASE =cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( '''This is a friendly reminder - the current text generation call will exceed the model\'s predefined ''' f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe " '''exceptions, performance degradation, or nothing at all.''' ) return is_done class A__ ( UpperCamelCase__ ): def __init__( self : List[str] , _a : int , _a : int ) -> List[str]: """simple docstring""" warnings.warn( '''The class `MaxNewTokensCriteria` is deprecated. ''' f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` " '''with `max_length = start_length + max_new_tokens` instead.''' , _a , ) _SCREAMING_SNAKE_CASE =start_length _SCREAMING_SNAKE_CASE =max_new_tokens _SCREAMING_SNAKE_CASE =start_length + max_new_tokens @add_start_docstrings(_a ) def __call__( self : int , _a : torch.LongTensor , _a : torch.FloatTensor , **_a : List[str] ) -> bool: """simple docstring""" return input_ids.shape[-1] >= self.max_length class A__ ( UpperCamelCase__ ): def __init__( self : Optional[int] , _a : float , _a : Optional[float] = None ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE =max_time _SCREAMING_SNAKE_CASE =time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(_a ) def __call__( self : Optional[int] , _a : torch.LongTensor , _a : torch.FloatTensor , **_a : Dict ) -> bool: """simple docstring""" return time.time() - self.initial_timestamp > self.max_time class A__ ( UpperCamelCase__ ): @add_start_docstrings(_a ) def __call__( self : List[Any] , _a : torch.LongTensor , _a : torch.FloatTensor , **_a : List[Any] ) -> bool: """simple docstring""" return any(criteria(_a , _a ) for criteria in self ) @property def __UpperCamelCase ( self : int ) -> Optional[int]: """simple docstring""" for stopping_criterium in self: if isinstance(_a , _a ): return stopping_criterium.max_length elif isinstance(_a , _a ): return stopping_criterium.max_length return None def lowerCamelCase( a__ ,a__): _SCREAMING_SNAKE_CASE =stopping_criteria.max_length _SCREAMING_SNAKE_CASE =deepcopy(a__) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' ,a__) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=a__)) return new_stopping_criteria
691
def lowerCamelCase( a__ ,a__ ,a__): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod else: _SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__) return (b * b) % mod # a prime number snake_case_ : Union[str, Any] = 7_01 snake_case_ : int = 10_00_00_00_00 snake_case_ : str = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
691
1
"""simple docstring""" import re def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> str: '''simple docstring''' if len(re.findall('[ATCG]' , UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
192
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _a : Optional[Any]= { "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any]= [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForPreTraining", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[str]= [ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any]= [ "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys _a : Tuple= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
192
1
"""simple docstring""" # Lint as: python3 import itertools import os import re A_ = re.compile(r"([A-Z]+)([A-Z][a-z])") A_ = re.compile(r"([a-z\d])([A-Z])") A_ = re.compile(r"(?<!_)_(?!_)") A_ = re.compile(r"(_{2,})") A_ = r"^\w+(\.\w+)*$" A_ = r"<>:/\|?*" def _UpperCamelCase ( A ): UpperCamelCase_ =_uppercase_uppercase_re.sub(R"\1_\2" , A ) UpperCamelCase_ =_lowercase_uppercase_re.sub(R"\1_\2" , A ) return name.lower() def _UpperCamelCase ( A ): UpperCamelCase_ =_single_underscore_re.split(A ) UpperCamelCase_ =[_multiple_underscores_re.split(A ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != "" ) def _UpperCamelCase ( A ): if os.path.basename(A ) != name: raise ValueError(f"""Should be a dataset name, not a path: {name}""" ) return camelcase_to_snakecase(A ) def _UpperCamelCase ( A , A ): if os.path.basename(A ) != name: raise ValueError(f"""Should be a dataset name, not a path: {name}""" ) if not re.match(_split_re , A ): raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" ) return f"""{filename_prefix_for_name(A )}-{split}""" def _UpperCamelCase ( A , A , A , A=None ): UpperCamelCase_ =filename_prefix_for_split(A , A ) if filetype_suffix: prefix += f""".{filetype_suffix}""" UpperCamelCase_ =os.path.join(A , A ) return f"""{filepath}*""" def _UpperCamelCase ( A , A , A , A=None , A=None ): UpperCamelCase_ =filename_prefix_for_split(A , A ) UpperCamelCase_ =os.path.join(A , A ) if shard_lengths: UpperCamelCase_ =len(A ) UpperCamelCase_ =[f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(A )] if filetype_suffix: UpperCamelCase_ =[filename + f""".{filetype_suffix}""" for filename in filenames] return filenames else: UpperCamelCase_ =prefix if filetype_suffix: filename += f""".{filetype_suffix}""" return [filename]
391
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A_ = { "configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"], "tokenization_ctrl": ["CTRLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
391
1
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __snake_case ( _SCREAMING_SNAKE_CASE): """simple docstring""" def __init__( self : int , lowerCamelCase : pyspark.sql.DataFrame , lowerCamelCase : Optional[NamedSplit] = None , lowerCamelCase : Optional[Features] = None , lowerCamelCase : bool = True , lowerCamelCase : str = None , lowerCamelCase : bool = False , lowerCamelCase : str = None , lowerCamelCase : bool = True , lowerCamelCase : str = "arrow" , **lowerCamelCase : Any , ) -> List[Any]: super().__init__( split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , **lowerCamelCase , ) lowerCAmelCase_ : Optional[Any] = load_from_cache_file lowerCAmelCase_ : Union[str, Any] = file_format lowerCAmelCase_ : List[Any] = Spark( df=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , working_dir=lowerCamelCase , **lowerCamelCase , ) def __lowercase ( self : Any ) -> Optional[Any]: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase_ : Any = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=lowerCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
398
'''simple docstring''' import numpy as np def UpperCamelCase_ ( A__ : np.array ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def UpperCamelCase_ ( A__ : np.array ): '''simple docstring''' return vector * sigmoid(1.702 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
398
1
def UpperCAmelCase_ ( _A = 1_00_00_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , __UpperCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
493
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch A__ : List[str] = logging.get_logger(__name__) @add_end_docstrings( UpperCamelCase_ ,R''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''' ,) class __snake_case ( UpperCamelCase_ ): def UpperCAmelCase__ ( self : Optional[Any] , A_ : GenericTensor): if self.framework == "tf": lowerCAmelCase_ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id).numpy() elif self.framework == "pt": lowerCAmelCase_ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_) else: raise ValueError('''Unsupported framework''') return masked_index def UpperCAmelCase__ ( self : Tuple , A_ : GenericTensor): lowerCAmelCase_ : List[str] = self.get_masked_index(A_) lowerCAmelCase_ : Union[str, Any] = np.prod(masked_index.shape) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def UpperCAmelCase__ ( self : str , A_ : GenericTensor): if isinstance(A_ , A_): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0]) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(A_) def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : Optional[int]=None , **A_ : List[str]): if return_tensors is None: lowerCAmelCase_ : Optional[int] = self.framework lowerCAmelCase_ : Optional[Any] = self.tokenizer(A_ , return_tensors=A_) self.ensure_exactly_one_mask_token(A_) return model_inputs def UpperCAmelCase__ ( self : List[str] , A_ : str): lowerCAmelCase_ : Union[str, Any] = self.model(**A_) lowerCAmelCase_ : List[str] = model_inputs['''input_ids'''] return model_outputs def UpperCAmelCase__ ( self : str , A_ : str , A_ : str=5 , A_ : int=None): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: lowerCAmelCase_ : int = target_ids.shape[0] lowerCAmelCase_ : List[Any] = model_outputs['''input_ids'''][0] lowerCAmelCase_ : int = model_outputs['''logits'''] if self.framework == "tf": lowerCAmelCase_ : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0] lowerCAmelCase_ : Optional[Any] = outputs.numpy() lowerCAmelCase_ : List[str] = outputs[0, masked_index, :] lowerCAmelCase_ : List[Any] = stable_softmax(A_ , axis=-1) if target_ids is not None: lowerCAmelCase_ : str = tf.gather_nd(tf.squeeze(A_ , 0) , target_ids.reshape(-1 , 1)) lowerCAmelCase_ : Any = tf.expand_dims(A_ , 0) lowerCAmelCase_ : List[Any] = tf.math.top_k(A_ , k=A_) lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy() else: lowerCAmelCase_ : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_).squeeze(-1) # Fill mask pipeline supports only one ${mask_token} per sample lowerCAmelCase_ : Dict = outputs[0, masked_index, :] lowerCAmelCase_ : Dict = logits.softmax(dim=-1) if target_ids is not None: lowerCAmelCase_ : str = probs[..., target_ids] lowerCAmelCase_ , lowerCAmelCase_ : int = probs.topk(A_) lowerCAmelCase_ : Union[str, Any] = [] lowerCAmelCase_ : Optional[int] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())): lowerCAmelCase_ : int = [] for v, p in zip(_values , _predictions): # Copy is important since we're going to modify this array in place lowerCAmelCase_ : Dict = input_ids.numpy().copy() if target_ids is not None: lowerCAmelCase_ : str = target_ids[p].tolist() lowerCAmelCase_ : List[Any] = p # Filter padding out: lowerCAmelCase_ : Tuple = tokens[np.where(tokens != self.tokenizer.pad_token_id)] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back lowerCAmelCase_ : Any = self.tokenizer.decode(A_ , skip_special_tokens=A_) lowerCAmelCase_ : str = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p]), '''sequence''': sequence} row.append(A_) result.append(A_) if single_mask: return result[0] return result def UpperCAmelCase__ ( self : int , A_ : Any , A_ : List[Any]=None): if isinstance(A_ , A_): lowerCAmelCase_ : List[str] = [targets] try: lowerCAmelCase_ : Union[str, Any] = self.tokenizer.get_vocab() except Exception: lowerCAmelCase_ : str = {} lowerCAmelCase_ : Any = [] for target in targets: lowerCAmelCase_ : List[str] = vocab.get(A_ , A_) if id_ is None: lowerCAmelCase_ : Optional[int] = self.tokenizer( A_ , add_special_tokens=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , max_length=1 , truncation=A_ , )['''input_ids'''] if len(A_) == 0: logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ '''We cannot replace it with anything meaningful, ignoring it''') continue lowerCAmelCase_ : Union[str, Any] = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.""") target_ids.append(id_) lowerCAmelCase_ : List[str] = list(set(A_)) if len(A_) == 0: raise ValueError('''At least one target must be provided when passed.''') lowerCAmelCase_ : Tuple = np.array(A_) return target_ids def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[int]=None , A_ : Tuple=None): lowerCAmelCase_ : int = {} if targets is not None: lowerCAmelCase_ : Optional[Any] = self.get_target_ids(A_ , A_) lowerCAmelCase_ : str = target_ids if top_k is not None: lowerCAmelCase_ : int = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''') return {}, {}, postprocess_params def __call__( self : str , A_ : Tuple , *A_ : Dict , **A_ : Optional[Any]): lowerCAmelCase_ : Tuple = super().__call__(A_ , **A_) if isinstance(A_ , A_) and len(A_) == 1: return outputs[0] return outputs
171
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=18 , __lowercase=30 , __lowercase=400 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , __lowercase=True , ) -> Dict: __UpperCamelCase :str = size if size is not None else {"shortest_edge": 20} __UpperCamelCase :str = crop_size if crop_size is not None else {"height": 18, "width": 18} __UpperCamelCase :Any = parent __UpperCamelCase :Optional[int] = batch_size __UpperCamelCase :Optional[Any] = num_channels __UpperCamelCase :str = image_size __UpperCamelCase :Optional[int] = min_resolution __UpperCamelCase :Tuple = max_resolution __UpperCamelCase :Dict = do_resize __UpperCamelCase :Optional[Any] = size __UpperCamelCase :Optional[int] = do_center_crop __UpperCamelCase :Tuple = crop_size __UpperCamelCase :Optional[int] = do_flip_channel_order def UpperCamelCase__ ( self) -> Union[str, Any]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class lowerCamelCase_ ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' a__ : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self) -> Tuple: __UpperCamelCase :int = MobileViTImageProcessingTester(self) @property def UpperCamelCase__ ( self) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self) -> Optional[int]: __UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__lowercase , '''do_resize''')) self.assertTrue(hasattr(__lowercase , '''size''')) self.assertTrue(hasattr(__lowercase , '''do_center_crop''')) self.assertTrue(hasattr(__lowercase , '''center_crop''')) self.assertTrue(hasattr(__lowercase , '''do_flip_channel_order''')) def UpperCamelCase__ ( self) -> List[Any]: __UpperCamelCase :List[Any] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 20}) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18}) __UpperCamelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {'''shortest_edge''': 42}) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84}) def UpperCamelCase__ ( self) -> Dict: pass def UpperCamelCase__ ( self) -> int: # Initialize image_processing __UpperCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCamelCase :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image) # Test not batched input __UpperCamelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __UpperCamelCase :Tuple = image_processing(__lowercase , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase__ ( self) -> List[Any]: # Initialize image_processing __UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCamelCase :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray) # Test not batched input __UpperCamelCase :Any = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __UpperCamelCase :Dict = image_processing(__lowercase , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase__ ( self) -> Tuple: # Initialize image_processing __UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCamelCase :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor) # Test not batched input __UpperCamelCase :int = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __UpperCamelCase :Tuple = image_processing(__lowercase , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
715
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __lowercase = '''bart''' __lowercase = True @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE ) def lowerCamelCase ( ): '''simple docstring''' if LOAD_DENSE_INDEX: __UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __UpperCamelCase :Union[str, Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __UpperCamelCase :Optional[int] = qar_model.eval() else: __UpperCamelCase , __UpperCamelCase :Optional[Any] = (None, None) if MODEL_TYPE == "bart": __UpperCamelCase :str = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __UpperCamelCase :List[Any] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __UpperCamelCase :Any = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __UpperCamelCase :List[str] = sas_model.eval() else: __UpperCamelCase , __UpperCamelCase :Tuple = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE ) def lowerCamelCase ( ): '''simple docstring''' if LOAD_DENSE_INDEX: __UpperCamelCase :int = faiss.StandardGpuResources() __UpperCamelCase :List[Any] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __UpperCamelCase :List[str] = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __UpperCamelCase :Tuple = faiss.IndexFlatIP(128 ) __UpperCamelCase :str = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE , 1 , SCREAMING_SNAKE_CASE ) wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU else: __UpperCamelCase , __UpperCamelCase :Any = (None, None) __UpperCamelCase :Optional[Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE ) def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Optional[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __UpperCamelCase :int = elia['''train_eli5'''] __UpperCamelCase :List[str] = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __UpperCamelCase :Optional[Any] = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(SCREAMING_SNAKE_CASE ) return (elia_train, eli5_train_q_index) __lowercase , __lowercase , __lowercase = load_indexes() __lowercase , __lowercase , __lowercase , __lowercase = load_models() __lowercase , __lowercase = load_train_data() def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=10 ): '''simple docstring''' __UpperCamelCase :str = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCamelCase , __UpperCamelCase :Union[str, Any] = eli5_train_q_index.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCamelCase :Optional[Any] = [elia_train[int(SCREAMING_SNAKE_CASE )] for i in I[0]] return nn_examples def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="wiki40b" , SCREAMING_SNAKE_CASE="dense" , SCREAMING_SNAKE_CASE=10 ): '''simple docstring''' if source == "none": __UpperCamelCase , __UpperCamelCase :Optional[int] = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __UpperCamelCase , __UpperCamelCase :List[str] = query_qa_dense_index( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: __UpperCamelCase , __UpperCamelCase :Optional[int] = query_es_index( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index_name='''english_wiki40b_snippets_100w''' , n_results=SCREAMING_SNAKE_CASE , ) __UpperCamelCase :int = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __UpperCamelCase :int = '''question: {} context: {}'''.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda SCREAMING_SNAKE_CASE : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE : None), } ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.95 , SCREAMING_SNAKE_CASE=0.8 ): '''simple docstring''' with torch.no_grad(): __UpperCamelCase :Union[str, Any] = qa_sas_generate( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , temp=SCREAMING_SNAKE_CASE , top_p=SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('''Long Form Question Answering with ELI5''') # Start sidebar __lowercase = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>''' __lowercase = ''' <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> ''' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __lowercase = ''' This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. ''' st.sidebar.markdown(description, unsafe_allow_html=True) __lowercase = [ '''Answer the question''', '''View the retrieved document only''', '''View the most similar ELI5 question and answer''', '''Show me everything, please!''', ] __lowercase = st.sidebar.checkbox('''Demo options''') if demo_options: __lowercase = st.sidebar.selectbox( '''''', action_list, index=3, ) __lowercase = action_list.index(action_st) __lowercase = st.sidebar.selectbox( '''''', ['''Show full text of passages''', '''Show passage section titles'''], index=0, ) __lowercase = show_type == '''Show full text of passages''' else: __lowercase = 3 __lowercase = True __lowercase = st.sidebar.checkbox('''Retrieval options''') if retrieval_options: __lowercase = ''' ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. ''' st.sidebar.markdown(retriever_info) __lowercase = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none''']) __lowercase = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed''']) else: __lowercase = '''wiki40b''' __lowercase = '''dense''' __lowercase = '''beam''' __lowercase = 2 __lowercase = 64 __lowercase = 256 __lowercase = None __lowercase = None __lowercase = st.sidebar.checkbox('''Generation options''') if generate_options: __lowercase = ''' ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder\'s output probabilities. ''' st.sidebar.markdown(generate_info) __lowercase = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled''']) __lowercase = st.sidebar.slider( '''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None ) __lowercase = st.sidebar.slider( '''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": __lowercase = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __lowercase = st.sidebar.slider( '''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) __lowercase = st.sidebar.slider( '''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) __lowercase = None # start main text __lowercase = [ '''<MY QUESTION>''', '''How do people make chocolate?''', '''Why do we get a fever when we are sick?''', '''How can different animals perceive different colors?''', '''What is natural language processing?''', '''What\'s the best way to treat a sunburn?''', '''What exactly are vitamins ?''', '''How does nuclear energy provide electricity?''', '''What\'s the difference between viruses and bacteria?''', '''Why are flutes classified as woodwinds when most of them are made out of metal ?''', '''Why do people like drinking coffee even though it tastes so bad?''', '''What happens when wine ages? How does it make the wine taste better?''', '''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''', '''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''', '''How does New Zealand have so many large bird predators?''', ] __lowercase = st.selectbox( '''What would you like to ask? ---- select <MY QUESTION> to enter a new query''', questions_list, index=1, ) if question_s == "<MY QUESTION>": __lowercase = st.text_input('''Enter your question here:''', '''''') else: __lowercase = question_s if st.button('''Show me!'''): if action in [0, 1, 3]: if index_type == "mixed": __lowercase , __lowercase = make_support(question, source=wiki_source, method='''dense''', n_results=10) __lowercase , __lowercase = make_support(question, source=wiki_source, method='''sparse''', n_results=10) __lowercase = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __lowercase = support_list[:10] __lowercase = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list]) else: __lowercase , __lowercase = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __lowercase , __lowercase = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == '''sampled'''), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('''### The model generated answer is:''') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''') for i, res in enumerate(support_list): __lowercase = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_''')) __lowercase = res[1].strip() if sec_titles == "": __lowercase = '''[{}]({})'''.format(res[0], wiki_url) else: __lowercase = sec_titles.split(''' & ''') __lowercase = ''' & '''.join( ['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list] ) st.markdown( '''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True ) if action in [2, 3]: __lowercase = find_nearest_training(question) __lowercase = nn_train_list[0] st.markdown( '''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title''']) ) __lowercase = [ '''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != ''''''])) for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score'''])) if i == 0 or sc > 2 ] st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st))) __lowercase = ''' --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* ''' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
452
0
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowerCamelCase_ ( lowerCAmelCase__ : Dict ) -> List[Any]: '''simple docstring''' return 1 / (1 + np.exp(-z )) def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ) -> Tuple: '''simple docstring''' return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean() def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> Tuple: '''simple docstring''' A = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) ) def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=70000 ) -> int: '''simple docstring''' A = np.zeros(x.shape[1] ) for iterations in range(lowerCAmelCase__ ): A = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) A = sigmoid_function(lowerCAmelCase__ ) A = np.dot(x.T , h - y ) / y.size A = theta - alpha * gradient # updating the weights A = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) A = sigmoid_function(lowerCAmelCase__ ) A = cost_function(lowerCAmelCase__ , lowerCAmelCase__ ) if iterations % 100 == 0: print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": __snake_case :Union[str, Any] =datasets.load_iris() __snake_case :Optional[int] =iris.data[:, :2] __snake_case :Any =(iris.target != 0) * 1 __snake_case :List[Any] =0.1 __snake_case :Union[str, Any] =logistic_reg(alpha, x, y, max_iterations=70000) print('theta: ', theta) # printing the theta i.e our weights vector def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> str: '''simple docstring''' return sigmoid_function( np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1') ((__snake_case) , (__snake_case)) :int =(x[:, 0].min(), x[:, 0].max()) ((__snake_case) , (__snake_case)) :Tuple =(x[:, 1].min(), x[:, 1].max()) ((__snake_case) , (__snake_case)) :Optional[Any] =np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) __snake_case :List[str] =np.c_[xxa.ravel(), xxa.ravel()] __snake_case :List[Any] =predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black') plt.legend() plt.show()
106
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Any=False , snake_case : Any=False )-> str: '''simple docstring''' UpperCAmelCase__ : Dict = "backbone." if is_semantic else "" UpperCAmelCase__ : Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ (f'{prefix}cls_token', "beit.embeddings.cls_token"), (f'{prefix}patch_embed.proj.weight', "beit.embeddings.patch_embeddings.projection.weight"), (f'{prefix}patch_embed.proj.bias', "beit.embeddings.patch_embeddings.projection.bias"), (f'{prefix}pos_embed', "beit.embeddings.position_embeddings"), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("mask_token", "beit.embeddings.mask_token"), ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) else: # layernorm + classification head rename_keys.extend( [ ("fc_norm.weight", "beit.pooler.layernorm.weight"), ("fc_norm.bias", "beit.pooler.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : List[str] , snake_case : List[Any]=False , snake_case : int=False )-> Any: '''simple docstring''' for i in range(config.num_hidden_layers ): UpperCAmelCase__ : Dict = "backbone." if is_semantic else "" # queries, keys and values UpperCAmelCase__ : List[str] = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' ) UpperCAmelCase__ : int = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' ) UpperCAmelCase__ : Optional[int] = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' ) UpperCAmelCase__ : List[Any] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase__ : Dict = q_bias UpperCAmelCase__ : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase__ : Any = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase__ : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCAmelCase__ : int = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' ) UpperCAmelCase__ : Tuple = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' ) UpperCAmelCase__ : Dict = gamma_a UpperCAmelCase__ : Tuple = gamma_a def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Dict , snake_case : Tuple )-> Any: '''simple docstring''' UpperCAmelCase__ : str = dct.pop(snake_case ) UpperCAmelCase__ : Tuple = val def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case , stream=snake_case ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : str , snake_case : Optional[Any]=False )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = False if "rvlcdip" in checkpoint_url else True UpperCAmelCase__ : int = BeitConfig(use_absolute_position_embeddings=snake_case , use_mask_token=snake_case ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCAmelCase__ : Optional[int] = 1024 UpperCAmelCase__ : Optional[Any] = 4096 UpperCAmelCase__ : List[str] = 24 UpperCAmelCase__ : List[str] = 16 # labels if "rvlcdip" in checkpoint_url: UpperCAmelCase__ : List[Any] = 16 UpperCAmelCase__ : Optional[int] = "huggingface/label-files" UpperCAmelCase__ : List[Any] = "rvlcdip-id2label.json" UpperCAmelCase__ : List[str] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) ) UpperCAmelCase__ : Dict = {int(snake_case ): v for k, v in idalabel.items()} UpperCAmelCase__ : List[Any] = idalabel UpperCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCAmelCase__ : List[Any] = torch.hub.load_state_dict_from_url(snake_case , map_location="cpu" )["model"] UpperCAmelCase__ : str = create_rename_keys(snake_case , has_lm_head=snake_case ) for src, dest in rename_keys: rename_key(snake_case , snake_case , snake_case ) read_in_q_k_v(snake_case , snake_case , has_lm_head=snake_case ) # load HuggingFace model UpperCAmelCase__ : int = BeitForMaskedImageModeling(snake_case ) if has_lm_head else BeitForImageClassification(snake_case ) model.eval() model.load_state_dict(snake_case ) # Check outputs on an image UpperCAmelCase__ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case ) UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Any = image_processor(images=snake_case , return_tensors="pt" ) UpperCAmelCase__ : List[str] = encoding["pixel_values"] UpperCAmelCase__ : Optional[Any] = model(snake_case ) UpperCAmelCase__ : Optional[Any] = outputs.logits # verify logits UpperCAmelCase__ : Union[str, Any] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(snake_case ), "Shape of logits not as expected" Path(snake_case ).mkdir(exist_ok=snake_case ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case ) if push_to_hub: if has_lm_head: UpperCAmelCase__ : Any = "dit-base" if "base" in checkpoint_url else "dit-large" else: UpperCAmelCase__ : int = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" image_processor.push_to_hub( repo_path_or_name=Path(snake_case , snake_case ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=snake_case , ) model.push_to_hub( repo_path_or_name=Path(snake_case , snake_case ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=snake_case , ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) _lowerCAmelCase : str = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
438
0
'''simple docstring''' import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _lowerCAmelCase ( UpperCamelCase_ ): """simple docstring""" def __A ( self : List[Any] ) -> int: """simple docstring""" lowerCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "tf_padding" ) ) self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "depth_multiplier" ) ) class _lowerCAmelCase : """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=1_3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Any=3_2 , SCREAMING_SNAKE_CASE : Optional[Any]=0.2_5 , SCREAMING_SNAKE_CASE : int=8 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Optional[Any]=1_0_2_4 , SCREAMING_SNAKE_CASE : str=3_2 , SCREAMING_SNAKE_CASE : List[str]="relu6" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Any=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=1_0 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ) -> List[str]: """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = depth_multiplier lowerCAmelCase = min_depth lowerCAmelCase = tf_padding lowerCAmelCase = int(last_hidden_size * depth_multiplier ) lowerCAmelCase = output_stride lowerCAmelCase = hidden_act lowerCAmelCase = classifier_dropout_prob lowerCAmelCase = use_labels lowerCAmelCase = is_training lowerCAmelCase = num_labels lowerCAmelCase = initializer_range lowerCAmelCase = scope def __A ( self : Any ) -> int: """simple docstring""" lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase = self.get_config() return config, pixel_values, labels, pixel_labels def __A ( self : str ) -> Optional[Any]: """simple docstring""" return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __A ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" lowerCAmelCase = MobileNetVaModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __A ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]: """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Any ) -> Any: """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" lowerCAmelCase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () lowerCAmelCase = ( {'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def __A ( self : Dict ) -> int: """simple docstring""" lowerCAmelCase = MobileNetVaModelTester(self ) lowerCAmelCase = MobileNetVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE ) def __A ( self : Tuple ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds" ) def __A ( self : List[Any] ) -> int: """simple docstring""" pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings" ) def __A ( self : Union[str, Any] ) -> int: """simple docstring""" pass @unittest.skip(reason="MobileNetV1 does not output attentions" ) def __A ( self : Tuple ) -> Dict: """simple docstring""" pass def __A ( self : str ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase = [*signature.parameters.keys()] lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def __A ( self : int ) -> str: """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def __A ( self : Any ) -> int: """simple docstring""" def check_hidden_states_output(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ): lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = outputs.hidden_states lowerCAmelCase = 2_6 self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __A ( self : int ) -> str: """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def __A ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = MobileNetVaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __a ( ) -> Optional[Any]: lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : Optional[Any] ) -> Dict: """simple docstring""" return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None ) @slow def __A ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.default_image_processor lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE ) # verify the logits lowerCAmelCase = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
713
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets lowercase : Union[str, Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' lowercase : Optional[int] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' lowercase : int = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def __a ( A__ , A__ , A__ , A__ , A__ = None , A__ = False , ) -> List[str]: if label_map is not None: for old_id, new_id in label_map.items(): lowerCAmelCase = new_id # turn into Numpy arrays lowerCAmelCase = np.array(A__ ) lowerCAmelCase = np.array(A__ ) if reduce_labels: lowerCAmelCase = 255 lowerCAmelCase = label - 1 lowerCAmelCase = 255 lowerCAmelCase = label != ignore_index lowerCAmelCase = np.not_equal(A__ , A__ ) lowerCAmelCase = pred_label[mask] lowerCAmelCase = np.array(A__ )[mask] lowerCAmelCase = pred_label[pred_label == label] lowerCAmelCase = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0] lowerCAmelCase = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0] lowerCAmelCase = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0] lowerCAmelCase = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __a ( A__ , A__ , A__ , A__ , A__ = None , A__ = False , ) -> Optional[int]: lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa ) lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa ) lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa ) lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(A__ , A__ ): lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = intersect_and_union( A__ , A__ , A__ , A__ , A__ , A__ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __a ( A__ , A__ , A__ , A__ , A__ = None , A__ = None , A__ = False , ) -> Dict: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = total_intersect_and_union( A__ , A__ , A__ , A__ , A__ , A__ ) # compute metrics lowerCAmelCase = {} lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum() lowerCAmelCase = total_area_intersect / total_area_union lowerCAmelCase = total_area_intersect / total_area_label lowerCAmelCase = np.nanmean(A__ ) lowerCAmelCase = np.nanmean(A__ ) lowerCAmelCase = all_acc lowerCAmelCase = iou lowerCAmelCase = acc if nan_to_num is not None: lowerCAmelCase = {metric: np.nan_to_num(A__ , nan=A__ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def __A ( self : Tuple ) -> Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) , reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] , ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE : bool = False , ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase = mean_iou( results=SCREAMING_SNAKE_CASE , gt_seg_maps=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , ignore_index=SCREAMING_SNAKE_CASE , nan_to_num=SCREAMING_SNAKE_CASE , label_map=SCREAMING_SNAKE_CASE , reduce_labels=SCREAMING_SNAKE_CASE , ) return iou_result
159
0
'''simple docstring''' def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [], [] while len(lowerCAmelCase_ ) > 1: SCREAMING_SNAKE_CASE_ : int = min(lowerCAmelCase_ ), max(lowerCAmelCase_ ) start.append(lowerCAmelCase_ ) end.append(lowerCAmelCase_ ) collection.remove(lowerCAmelCase_ ) collection.remove(lowerCAmelCase_ ) end.reverse() return start + collection + end if __name__ == "__main__": snake_case_ = input('Enter numbers separated by a comma:\n').strip() snake_case_ = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
421
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __magic_name__ = 1.054_571_817E-34 # unit of ℏ : J * s __magic_name__ = 3E8 # unit of c : m * s^-1 def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if (force, area, distance).count(0) != 1: raise ValueError("One and only one argument must be 0") if force < 0: raise ValueError("Magnitude of force can not be negative") if distance < 0: raise ValueError("Distance can not be negative") if area < 0: raise ValueError("Area can not be negative") if force == 0: lowerCamelCase_ : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCamelCase_ : List[str] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCamelCase_ : Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0") # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
250
0
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase ( _A : Dict , _A : Any , _A : Dict=None )-> Tuple: """simple docstring""" assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match""" A__ = nn.Parameter(_A ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match""" A__ = nn.Parameter(_A ) def UpperCamelCase ( _A : int , _A : Optional[int] , _A : Any )-> Optional[int]: """simple docstring""" A__ = np.asarray(weights[0] ) A__ = np.asarray(weights[1] ) A__ = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , ) set_param( torch_layer.output.dense , torch.tensor(_A ).view(-1 , _A ).contiguous().transpose(0 , 1 ) , ) def UpperCamelCase ( _A : Optional[Any] , _A : Tuple , _A : List[Any] )-> Optional[Any]: """simple docstring""" A__ = np.asarray(weights[0] ) A__ = np.asarray(weights[1] ) A__ = np.asarray(weights[2] ) A__ = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , ) set_param( torch_layer.self_attention.key , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , ) set_param( torch_layer.output.dense , torch.tensor(_A ).view(-1 , _A ).contiguous().transpose(0 , 1 ) , ) def UpperCamelCase ( _A : Dict , _A : str , _A : List[Any] )-> Any: """simple docstring""" A__ = weights[0][0][0] A__ = np.asarray(layer_norm_a[0] ) A__ = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , ) # lsh weights + output A__ = weights[0][1] if len(_A ) < 4: set_layer_weights_in_torch_lsh(_A , torch_block.attention , _A ) else: set_layer_weights_in_torch_local(_A , torch_block.attention , _A ) # intermediate weighs A__ = weights[2][0][1][2] # Chunked Feed Forward if len(_A ) == 4: A__ = intermediate_weights[2] # layernorm 2 A__ = np.asarray(intermediate_weights[0][0] ) A__ = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , ) # intermediate dense A__ = np.asarray(intermediate_weights[1][0] ) A__ = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , ) # intermediate out A__ = np.asarray(intermediate_weights[4][0] ) A__ = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , ) def UpperCamelCase ( _A : List[Any] , _A : int , _A : List[Any] )-> Union[str, Any]: """simple docstring""" A__ = torch_model.reformer # word embeds A__ = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(_A ) , ) if isinstance(weights[3] , _A ): A__ = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): A__ = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"""{position_embeddings[emb_idx]} emb does not match""" A__ = nn.Parameter(torch.tensor(_A ) ) A__ = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( _A ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): A__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(_A , _A , _A ) # output layer norm A__ = np.asarray(weights[7][0] ) A__ = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , ) # output embeddings A__ = np.asarray(weights[9][0] ) A__ = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , ) def UpperCamelCase ( _A : Tuple , _A : Any , _A : Any )-> List[Any]: """simple docstring""" A__ = ReformerConfig.from_json_file(_A ) print(f"""Building PyTorch model from configuration: {config}""" ) A__ = ReformerModelWithLMHead(_A ) with open(_A , "rb" ) as f: A__ = pickle.load(_A )["weights"] set_model_weights_in_torch(_A , _A , config.hidden_size ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , _A ) if __name__ == "__main__": UpperCAmelCase_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase_ : List[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
708
def UpperCamelCase ( _A : int = 50 )-> int: """simple docstring""" A__ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F'''{solution() = }''')
232
0
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup __SCREAMING_SNAKE_CASE : Dict = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def UpperCAmelCase__ ( __magic_name__ : List[Any] ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') __SCREAMING_SNAKE_CASE : int = parser.parse_args() if args.check_lib: __SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module('transformers') __SCREAMING_SNAKE_CASE : str = Path(transformers_module.__file__).parent else: __SCREAMING_SNAKE_CASE : Union[str, Any] = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
348
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A: List[str] = logging.get_logger(__name__) _A: Optional[Any] = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class UpperCAmelCase ( UpperCAmelCase_ ): _A : List[Any] = """instructblip_vision_model""" def __init__( self , __A=1_408 , __A=6_144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=1E-6 , __A=0.0 , __A=1E-10 , __A=True , **__A , ): super().__init__(**__A ) __UpperCAmelCase = hidden_size __UpperCAmelCase = intermediate_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = patch_size __UpperCAmelCase = image_size __UpperCAmelCase = initializer_range __UpperCAmelCase = attention_dropout __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = hidden_act __UpperCAmelCase = qkv_bias @classmethod def __lowerCamelCase ( cls , __A , **__A ): cls._set_token_in_kwargs(__A ) __UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCAmelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__A , **__A ) class UpperCAmelCase ( UpperCAmelCase_ ): _A : Optional[Any] = """instructblip_qformer""" def __init__( self , __A=30_522 , __A=768 , __A=12 , __A=12 , __A=3_072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1_408 , **__A , ): super().__init__(pad_token_id=__A , **__A ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = cross_attention_frequency __UpperCAmelCase = encoder_hidden_size @classmethod def __lowerCamelCase ( cls , __A , **__A ): cls._set_token_in_kwargs(__A ) __UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(__A , **__A ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __UpperCAmelCase = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__A , **__A ) class UpperCAmelCase ( UpperCAmelCase_ ): _A : Dict = """instructblip""" _A : List[Any] = True def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ): super().__init__(**__A ) if vision_config is None: __UpperCAmelCase = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __UpperCAmelCase = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __UpperCAmelCase = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __UpperCAmelCase = InstructBlipVisionConfig(**__A ) __UpperCAmelCase = InstructBlipQFormerConfig(**__A ) __UpperCAmelCase = text_config['model_type'] if 'model_type' in text_config else 'opt' __UpperCAmelCase = CONFIG_MAPPING[text_model_type](**__A ) __UpperCAmelCase = self.text_config.tie_word_embeddings __UpperCAmelCase = self.text_config.is_encoder_decoder __UpperCAmelCase = num_query_tokens __UpperCAmelCase = self.vision_config.hidden_size __UpperCAmelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __UpperCAmelCase = 1.0 __UpperCAmelCase = 0.0_2 @classmethod def __lowerCamelCase ( cls , __A , __A , __A , **__A , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , ) def __lowerCamelCase ( self ): __UpperCAmelCase = copy.deepcopy(self.__dict__ ) __UpperCAmelCase = self.vision_config.to_dict() __UpperCAmelCase = self.qformer_config.to_dict() __UpperCAmelCase = self.text_config.to_dict() __UpperCAmelCase = self.__class__.model_type return output
126
0
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path _SCREAMING_SNAKE_CASE = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) _SCREAMING_SNAKE_CASE = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} _SCREAMING_SNAKE_CASE = """zero2""" _SCREAMING_SNAKE_CASE = """zero3""" _SCREAMING_SNAKE_CASE = [ZEROa, ZEROa] def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param snake_case_ : Tuple = parameterized.to_safe_name('_'.join(str(__a ) for x in param.args ) ) return f"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test _SCREAMING_SNAKE_CASE = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( snake_case_ ): @parameterized.expand(_A , name_func=_A ) def UpperCAmelCase_ ( self : Tuple , _A : Optional[Any] , _A : Optional[int] ) -> Dict: """simple docstring""" self.run_and_check( stage=_A , model=_A , distributed=_A , fpaa=_A , ) @require_torch_multi_gpu @parameterized.expand(_A , name_func=_A ) def UpperCAmelCase_ ( self : int , _A : Any , _A : int ) -> Dict: """simple docstring""" self.run_and_check( stage=_A , model=_A , distributed=_A , fpaa=_A , ) @parameterized.expand(_A , name_func=_A ) def UpperCAmelCase_ ( self : str , _A : List[str] , _A : str ) -> Tuple: """simple docstring""" self.run_and_check( stage=_A , model=_A , distributed=_A , fpaa=_A , ) @require_torch_multi_gpu @parameterized.expand(_A , name_func=_A ) def UpperCAmelCase_ ( self : Optional[int] , _A : int , _A : Union[str, Any] ) -> List[Any]: """simple docstring""" self.run_and_check( stage=_A , model=_A , distributed=_A , fpaa=_A , ) def UpperCAmelCase_ ( self : Any , _A : Union[str, Any] ) -> Any: """simple docstring""" pass def UpperCAmelCase_ ( self : List[str] , _A : str , _A : str , _A : int = 10 , _A : bool = True , _A : bool = True , _A : bool = True , ) -> Any: """simple docstring""" snake_case_ : Dict = models[model] snake_case_ : str = self.run_trainer( stage=_A , model_name=_A , eval_steps=_A , num_train_epochs=1 , distributed=_A , fpaa=_A , ) self.do_checks(_A ) return output_dir def UpperCAmelCase_ ( self : Any , _A : str , _A : str , _A : int = 10 , _A : int = 1 , _A : bool = True , _A : bool = True , ) -> Dict: """simple docstring""" snake_case_ : str = self.get_auto_remove_tmp_dir('./xxx' , after=_A ) snake_case_ : Tuple = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(_A )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files snake_case_ : Any = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() snake_case_ : str = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] snake_case_ : int = self.get_launcher(_A ) snake_case_ : List[str] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_A , env=self.get_env() ) return output_dir def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[Any]=False ) -> List[str]: """simple docstring""" snake_case_ : str = min(2 , get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
534
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): __magic_name__: Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING __magic_name__: Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING def UpperCAmelCase_ ( self : str ) -> str: """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' ) snake_case_ : Optional[Any] = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ {'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 38015, 'token_str': ' grouped'}, {'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 25506, 'token_str': ' accuser'}, ] , ) snake_case_ : int = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ { 'sequence': 'The largest city in France is grouped', 'score': 2.1E-05, 'token': 38015, 'token_str': ' grouped', }, { 'sequence': 'The largest city in France is accuser', 'score': 2.1E-05, 'token': 25506, 'token_str': ' accuser', }, ] , ) snake_case_ : Any = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ {'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" snake_case_ : Tuple = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' ) snake_case_ : Tuple = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ {'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul'}, {'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) snake_case_ : int = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ { 'sequence': 'The largest city in France is Maul', 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul', }, {'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) snake_case_ : Any = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ {'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 2E-05, 'token': 2941, 'token_str': ' Te'}, {'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'}, ] , ) snake_case_ : List[Any] = unmasker('My name is <mask> <mask>' , top_k=2 ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ [ { 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is Maul<mask></s>', }, {'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'}, ], [ { 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is<mask> Maul</s>', }, {'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'}, ], ] , ) @require_torch_gpu def UpperCAmelCase_ ( self : str ) -> Any: """simple docstring""" snake_case_ : Union[str, Any] = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' ) # convert model to fp16 pipe.model.half() snake_case_ : Tuple = pipe('Paris is the [MASK] of France.' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(_A , _A ) @slow @require_torch def UpperCAmelCase_ ( self : str ) -> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' ) self.run_large_test(_A ) @slow @require_tf def UpperCAmelCase_ ( self : Optional[int] ) -> int: """simple docstring""" snake_case_ : Dict = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' ) self.run_large_test(_A ) def UpperCAmelCase_ ( self : Dict , _A : List[Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ : List[Any] = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_A ) , [ {'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'}, {'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'}, ] , ) snake_case_ : List[Any] = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_A ) , [ { 'sequence': 'The largest city in France is Paris', 'score': 0.2_5_1, 'token': 2201, 'token_str': ' Paris', }, { 'sequence': 'The largest city in France is Lyon', 'score': 0.2_1_4, 'token': 12790, 'token_str': ' Lyon', }, ] , ) snake_case_ : Tuple = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_A ) , [ {'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def UpperCAmelCase_ ( self : Optional[int] ) -> Any: """simple docstring""" snake_case_ : Optional[Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' ) snake_case_ : Tuple = None snake_case_ : str = None self.run_pipeline_test(_A , [] ) @require_tf def UpperCAmelCase_ ( self : Tuple ) -> Dict: """simple docstring""" snake_case_ : List[Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' ) snake_case_ : List[str] = None snake_case_ : List[str] = None self.run_pipeline_test(_A , [] ) def UpperCAmelCase_ ( self : List[str] , _A : List[Any] , _A : Tuple , _A : Optional[int] ) -> Optional[Any]: """simple docstring""" if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' ) snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A ) snake_case_ : Optional[Any] = [ F"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def UpperCAmelCase_ ( self : Optional[Any] , _A : str , _A : List[Any] ) -> int: """simple docstring""" snake_case_ : Optional[int] = fill_masker.tokenizer snake_case_ : List[Any] = fill_masker.model snake_case_ : int = fill_masker( F"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) snake_case_ : Dict = fill_masker([F"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) snake_case_ : Optional[int] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( _A , [ [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ], [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ], ] , ) with self.assertRaises(_A ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(_A ): fill_masker('This is' ) self.run_test_top_k(_A , _A ) self.run_test_targets(_A , _A ) self.run_test_top_k_targets(_A , _A ) self.fill_mask_with_duplicate_targets_and_top_k(_A , _A ) self.fill_mask_with_multiple_masks(_A , _A ) def UpperCAmelCase_ ( self : Optional[Any] , _A : Any , _A : Optional[int] ) -> Any: """simple docstring""" snake_case_ : Dict = tokenizer.get_vocab() snake_case_ : List[Any] = sorted(vocab.keys() )[:2] # Pipeline argument snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A , targets=_A ) snake_case_ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) snake_case_ : List[str] = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _A ) snake_case_ : int = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_A ) ) # Call argument snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A ) snake_case_ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) snake_case_ : Any = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _A ) snake_case_ : Tuple = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_A ) ) # Score equivalence snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A ) snake_case_ : Any = [top_mask['token_str'] for top_mask in outputs] snake_case_ : Optional[Any] = [top_mask['score'] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_A ) == set(_A ): snake_case_ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A ) snake_case_ : Union[str, Any] = [top_mask['score'] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) ) # Raises with invalid with self.assertRaises(_A ): snake_case_ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(_A ): snake_case_ : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[''] ) with self.assertRaises(_A ): snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets='' ) def UpperCAmelCase_ ( self : Tuple , _A : Any , _A : Optional[Any] ) -> Any: """simple docstring""" snake_case_ : str = FillMaskPipeline(model=_A , tokenizer=_A , top_k=2 ) snake_case_ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) snake_case_ : Any = FillMaskPipeline(model=_A , tokenizer=_A ) snake_case_ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) ) def UpperCAmelCase_ ( self : Tuple , _A : Any , _A : Dict ) -> str: """simple docstring""" snake_case_ : str = tokenizer.get_vocab() snake_case_ : Tuple = FillMaskPipeline(model=_A , tokenizer=_A ) # top_k=2, ntargets=3 snake_case_ : str = sorted(vocab.keys() )[:3] snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_A ) # If we use the most probably targets, and filter differently, we should still # have the same results snake_case_ : Any = [el['token_str'] for el in sorted(_A , key=lambda _A : x["score"] , reverse=_A )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_A ).issubset(_A ): snake_case_ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_A ) # They should yield exactly the same result self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) ) def UpperCAmelCase_ ( self : str , _A : Dict , _A : Tuple ) -> Dict: """simple docstring""" snake_case_ : Tuple = FillMaskPipeline(model=_A , tokenizer=_A ) snake_case_ : List[Any] = tokenizer.get_vocab() # String duplicates + id duplicates snake_case_ : str = sorted(vocab.keys() )[:3] snake_case_ : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]] snake_case_ : str = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=_A , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(_A ) , 3 ) def UpperCAmelCase_ ( self : List[str] , _A : str , _A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = FillMaskPipeline(model=_A , tokenizer=_A ) snake_case_ : List[str] = fill_masker( F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _A , [ [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ], [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ], [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ], ] , )
534
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __UpperCamelCase ( self ): snake_case__ : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) ) self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) ) class __snake_case : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ): snake_case__ : List[str] = parent snake_case__ : Tuple = batch_size snake_case__ : Union[str, Any] = image_size snake_case__ : List[Any] = patch_sizes snake_case__ : Optional[int] = patch_stride snake_case__ : Optional[Any] = patch_padding snake_case__ : Any = is_training snake_case__ : int = use_labels snake_case__ : Dict = num_labels snake_case__ : Optional[Any] = num_channels snake_case__ : Optional[Any] = embed_dim snake_case__ : Optional[int] = num_heads snake_case__ : Optional[int] = stride_kv snake_case__ : int = depth snake_case__ : Optional[Any] = cls_token snake_case__ : List[Any] = attention_drop_rate snake_case__ : Union[str, Any] = initializer_range snake_case__ : List[Any] = layer_norm_eps def __UpperCamelCase ( self ): snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : List[Any] = None if self.use_labels: # create a random int32 tensor of given shape snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[str] = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self ): return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) snake_case__ : Tuple = (self.image_size, self.image_size) snake_case__ , snake_case__ : str = image_size[0], image_size[1] for i in range(len(self.depth ) ): snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): snake_case__ : Any = self.num_labels snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () lowerCamelCase__ = ( {'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def __UpperCamelCase ( self ): snake_case__ : Optional[Any] = TFCvtModelTester(self ) snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 ) def __UpperCamelCase ( self ): self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="""Cvt does not output attentions""" ) def __UpperCamelCase ( self ): pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def __UpperCamelCase ( self ): pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def __UpperCamelCase ( self ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) def __UpperCamelCase ( self ): super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) @slow def __UpperCamelCase ( self ): super().test_keras_fit() @unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" ) def __UpperCamelCase ( self ): snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" ) tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("""float32""" ) def __UpperCamelCase ( self ): snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE ) snake_case__ : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Optional[Any] = [*signature.parameters.keys()] snake_case__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self ): def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) snake_case__ : Optional[int] = outputs.hidden_states snake_case__ : Tuple = len(self.model_tester.depth ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : List[Any] = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[str] = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self ): snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def __UpperCamelCase ( self ): for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def UpperCamelCase__ ( ) -> str: '''simple docstring''' snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def __UpperCamelCase ( self ): return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __UpperCamelCase ( self ): snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case__ : Union[str, Any] = self.default_image_processor snake_case__ : int = prepare_img() snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) # forward pass snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ) # verify the logits snake_case__ : str = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
38
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ : Optional[int] = logging.get_logger(__name__) A_ : Tuple = { "nvidia/segformer-b0-finetuned-ade-512-512": ( "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = '''segformer''' def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ): super().__init__(**__SCREAMING_SNAKE_CASE ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , ) snake_case__ : Dict = num_channels snake_case__ : Optional[Any] = num_encoder_blocks snake_case__ : Any = depths snake_case__ : Optional[int] = sr_ratios snake_case__ : Tuple = hidden_sizes snake_case__ : List[str] = patch_sizes snake_case__ : str = strides snake_case__ : Optional[int] = mlp_ratios snake_case__ : Optional[Any] = num_attention_heads snake_case__ : Dict = hidden_act snake_case__ : Optional[int] = hidden_dropout_prob snake_case__ : List[str] = attention_probs_dropout_prob snake_case__ : List[Any] = classifier_dropout_prob snake_case__ : int = initializer_range snake_case__ : List[str] = drop_path_rate snake_case__ : int = layer_norm_eps snake_case__ : List[Any] = decoder_hidden_size snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE ) snake_case__ : Dict = semantic_loss_ignore_index class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = version.parse('''1.11''' ) @property def __UpperCamelCase ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __UpperCamelCase ( self ): return 1e-4 @property def __UpperCamelCase ( self ): return 1_2
38
1
'''simple docstring''' from collections import defaultdict def __lowerCamelCase ( _lowercase , _lowercase ) -> bool: UpperCamelCase = first_str.lower().strip() UpperCamelCase = second_str.lower().strip() # Remove whitespace UpperCamelCase = first_str.replace(' ' , '' ) UpperCamelCase = second_str.replace(' ' , '' ) # Strings of different lengths are not anagrams if len(_lowercase ) != len(_lowercase ): return False # Default values for count should be 0 UpperCamelCase = defaultdict(_lowercase ) # For each character in input strings, # increment count in the corresponding for i in range(len(_lowercase ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() _snake_case = input('''Enter the first string ''').strip() _snake_case = input('''Enter the second string ''').strip() _snake_case = check_anagrams(input_a, input_b) print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
701
from math import factorial, pi def __lowerCamelCase ( _lowercase , _lowercase = 30 ) -> float: if not isinstance(_lowercase , (int, float) ): raise ValueError('maclaurin_sin() requires either an int or float for theta' ) if not isinstance(_lowercase , _lowercase ) or accuracy <= 0: raise ValueError('maclaurin_sin() requires a positive int for accuracy' ) UpperCamelCase = float(_lowercase ) UpperCamelCase = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_lowercase ) ) def __lowerCamelCase ( _lowercase , _lowercase = 30 ) -> float: if not isinstance(_lowercase , (int, float) ): raise ValueError('maclaurin_cos() requires either an int or float for theta' ) if not isinstance(_lowercase , _lowercase ) or accuracy <= 0: raise ValueError('maclaurin_cos() requires a positive int for accuracy' ) UpperCamelCase = float(_lowercase ) UpperCamelCase = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_lowercase ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
170
0
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
363
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCAmelCase : '''simple docstring''' def __init__( self , snake_case_ , snake_case_=3 , snake_case_=32 , snake_case_=3 , snake_case_=10 , snake_case_=[10, 20, 30, 40] , snake_case_=[1, 1, 2, 1] , snake_case_=True , snake_case_=True , snake_case_="relu" , snake_case_=3 , snake_case_=None , ): '''simple docstring''' A__ : Tuple = parent A__ : int = batch_size A__ : List[Any] = image_size A__ : Any = num_channels A__ : Tuple = embeddings_size A__ : Union[str, Any] = hidden_sizes A__ : Tuple = depths A__ : Union[str, Any] = is_training A__ : Any = use_labels A__ : Optional[Any] = hidden_act A__ : str = num_labels A__ : Optional[Any] = scope A__ : List[Any] = len(snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ : str = None if self.use_labels: A__ : Tuple = ids_tensor([self.batch_size] , self.num_labels ) A__ : Dict = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : Union[str, Any] = TFRegNetModel(config=snake_case_ ) A__ : Optional[int] = model(snake_case_ , training=snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : Optional[int] = self.num_labels A__ : Tuple = TFRegNetForImageClassification(snake_case_ ) A__ : Optional[int] = model(snake_case_ , labels=snake_case_ , training=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self ): '''simple docstring''' A__ : List[str] = self.prepare_config_and_inputs() A__ , A__ , A__ : Optional[Any] = config_and_inputs A__ : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __UpperCAmelCase (__A , __A , unittest.TestCase ): '''simple docstring''' _UpperCamelCase : str = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () _UpperCamelCase : Tuple = ( {'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification} if is_tf_available() else {} ) _UpperCamelCase : str = False _UpperCamelCase : int = False _UpperCamelCase : Tuple = False _UpperCamelCase : Any = False _UpperCamelCase : Union[str, Any] = False def lowerCamelCase ( self ): '''simple docstring''' A__ : Dict = TFRegNetModelTester(self ) A__ : str = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' return @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) @slow def lowerCamelCase ( self ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : int = model_class(snake_case_ ) A__ : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ : Union[str, Any] = [*signature.parameters.keys()] A__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ): A__ : str = model_class(snake_case_ ) A__ : Union[str, Any] = model(**self._prepare_for_class(snake_case_ , snake_case_ ) , training=snake_case_ ) A__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common() A__ : Optional[int] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: A__ : Optional[Any] = layer_type A__ : Dict = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ : List[Any] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(snake_case_ , snake_case_ , snake_case_ , snake_case_={} ): A__ : List[str] = model(snake_case_ , return_dict=snake_case_ , **snake_case_ ) A__ : Union[str, Any] = model(snake_case_ , return_dict=snake_case_ , **snake_case_ ).to_tuple() def recursive_check(snake_case_ , snake_case_ ): if isinstance(snake_case_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case_ , snake_case_ ): recursive_check(snake_case_ , snake_case_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(snake_case_ , snake_case_ ) ) , msg=( """Tuple and dict output are not equal. Difference:""" F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(snake_case_ , snake_case_ ) for model_class in self.all_model_classes: A__ : Any = model_class(snake_case_ ) A__ : Dict = self._prepare_for_class(snake_case_ , snake_case_ ) A__ : List[Any] = self._prepare_for_class(snake_case_ , snake_case_ ) check_equivalence(snake_case_ , snake_case_ , snake_case_ ) A__ : Tuple = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) A__ : List[str] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) check_equivalence(snake_case_ , snake_case_ , snake_case_ ) A__ : Optional[int] = self._prepare_for_class(snake_case_ , snake_case_ ) A__ : List[str] = self._prepare_for_class(snake_case_ , snake_case_ ) check_equivalence(snake_case_ , snake_case_ , snake_case_ , {"""output_hidden_states""": True} ) A__ : Dict = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) A__ : Union[str, Any] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) check_equivalence(snake_case_ , snake_case_ , snake_case_ , {"""output_hidden_states""": True} ) def lowerCamelCase ( self ): '''simple docstring''' A__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Any = TFRegNetModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def _A( ): A__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __UpperCAmelCase (unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase ( self ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' A__ : int = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A__ : str = self.default_image_processor A__ : Any = prepare_img() A__ : Tuple = image_processor(images=snake_case_ , return_tensors="""tf""" ) # forward pass A__ : List[Any] = model(**snake_case_ , training=snake_case_ ) # verify the logits A__ : Tuple = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) A__ : Tuple = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , snake_case_ , atol=1E-4 )
363
1
"""simple docstring""" import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union A__ : Optional[int] = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class _lowercase : '''simple docstring''' _A = 42 _A = None _A = None _A = None _A = None def lowerCAmelCase__ ( self )-> Optional[Any]: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = _str_to_version_tuple(self.version_str ) def __repr__( self )-> Dict: return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}" @property def lowerCAmelCase__ ( self )-> Dict: return self.major, self.minor, self.patch def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple: if isinstance(__UpperCamelCase , __UpperCamelCase ): return Version(__UpperCamelCase ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): return other raise TypeError(F"{other} (type {type(__UpperCamelCase )}) cannot be compared to version." ) def __eq__( self , __UpperCamelCase )-> Optional[int]: try: UpperCAmelCase__ : Any = self._validate_operand(__UpperCamelCase ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self , __UpperCamelCase )-> Union[str, Any]: UpperCAmelCase__ : List[str] = self._validate_operand(__UpperCamelCase ) return self.tuple < other.tuple def __hash__( self )-> Union[str, Any]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowerCAmelCase__ ( cls , __UpperCamelCase )-> Optional[Any]: UpperCAmelCase__ : Union[str, Any] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowerCAmelCase__ ( self )-> Union[str, Any]: return self.version_str def a__ ( lowerCAmelCase : Dict ): '''simple docstring''' UpperCAmelCase__ : str = _VERSION_REG.match(_UpperCamelCase ) if not res: raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." ) return tuple(int(_UpperCamelCase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] ) def a__ ( lowerCAmelCase : Any ): '''simple docstring''' return ".".join(str(_UpperCamelCase ) for v in version_tuple )
716
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def a__ ( lowerCAmelCase : List[str] ): '''simple docstring''' def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ): UpperCAmelCase__ : Optional[int] = timeit.default_timer() UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase ) UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime return delta UpperCAmelCase__ : int = func.__name__ return wrapper def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ): '''simple docstring''' UpperCAmelCase__ : str = [] UpperCAmelCase__ : Optional[Any] = seq_shapes or {} for i in range(lowerCAmelCase ): UpperCAmelCase__ : int = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(lowerCAmelCase , _ArrayXD ): UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(lowerCAmelCase , datasets.Value ): if v.dtype == "string": UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged." else: UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(lowerCAmelCase , datasets.Sequence ): while isinstance(lowerCAmelCase , datasets.Sequence ): UpperCAmelCase__ : List[str] = v.feature UpperCAmelCase__ : Optional[int] = seq_shapes[k] UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype ) UpperCAmelCase__ : Union[str, Any] = data dummy_data.append((i, example) ) return dummy_data def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ): '''simple docstring''' UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase ) with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer: for key, record in dummy_data: UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase ) writer.write(lowerCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." ) UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) ) return dataset
660
0
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: __lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=lowerCamelCase__ , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=lowerCamelCase__ , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=lowerCamelCase__ , default=4_2 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=lowerCamelCase__ , default=0 , help='cuda_id.' , ) __lowerCamelCase : Dict = parser.parse_args() return args def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str: if not len(lowerCamelCase__ ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) __lowerCamelCase , __lowerCamelCase : Dict = imgs[0].size __lowerCamelCase : Dict = Image.new('RGB' , size=(cols * w, rows * h) ) __lowerCamelCase , __lowerCamelCase : Any = grid.size for i, img in enumerate(lowerCamelCase__ ): grid.paste(lowerCamelCase__ , box=(i % cols * w, i // cols * h) ) return grid def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__="robotic cat with wings" , lowerCamelCase__=7.5 , lowerCamelCase__=5_0 , lowerCamelCase__=1 , lowerCamelCase__=4_2 , ) -> List[Any]: __lowerCamelCase : Dict = torch.Generator(pipeline.device ).manual_seed(lowerCamelCase__ ) __lowerCamelCase : List[str] = pipeline( lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , ).images __lowerCamelCase : Any = int(math.sqrt(lowerCamelCase__ ) ) __lowerCamelCase : Optional[int] = image_grid(lowerCamelCase__ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images a =parse_args() # Load models and create wrapper for stable diffusion a =CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""") a =CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""") a =AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""") a =UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""") a =StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) a =lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")): a =load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, """unet""", unet) else: a =unet.to(torch.device("""cuda""", args.cuda_id)) a =pipeline.to(unet.device) a , a =generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split())))) a =os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
652
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ={"""vocab_file""": """vocab.txt"""} a ={ """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } a ={ """openbmb/cpm-ant-10b""": 1024, } def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: __lowerCamelCase : int = collections.OrderedDict() with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader: __lowerCamelCase : Optional[int] = reader.readlines() for index, token in enumerate(lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = token.rstrip('\n' ) __lowerCamelCase : Union[str, Any] = index return vocab class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_0): __lowerCamelCase : str = vocab __lowerCamelCase : Dict = unk_token __lowerCamelCase : int = max_input_chars_per_word def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]): __lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__) if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word: return [self.unk_token] __lowerCamelCase : Tuple = 0 __lowerCamelCase : str = [] while start < len(SCREAMING_SNAKE_CASE__): __lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = None while start < end: __lowerCamelCase : Any = ''.join(chars[start:end]) if substr in self.vocab: __lowerCamelCase : Optional[Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = end return sub_tokens class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = VOCAB_FILES_NAMES _UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase : Optional[int] = False def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple="<d>" ,SCREAMING_SNAKE_CASE__ : Tuple="</d>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="</n>" ,SCREAMING_SNAKE_CASE__ : int="</_>" ,SCREAMING_SNAKE_CASE__ : List[Any]="left" ,**SCREAMING_SNAKE_CASE__ : List[str] ,): requires_backends(self ,['jieba']) super().__init__( bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Optional[Any] = bod_token __lowerCamelCase : Dict = eod_token __lowerCamelCase : Any = load_vocab(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = self.encoder[space_token] __lowerCamelCase : Dict = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] __lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) __lowerCamelCase : int = {v: k for k, v in self.encoder.items()} __lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token) @property def lowerCAmelCase ( self : List[Any]): return self.encoder[self.bod_token] @property def lowerCAmelCase ( self : Tuple): return self.encoder[self.eod_token] @property def lowerCAmelCase ( self : Union[str, Any]): return self.encoder["\n"] @property def lowerCAmelCase ( self : str): return len(self.encoder) def lowerCAmelCase ( self : str): return dict(self.encoder ,**self.added_tokens_encoder) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : Any = [] for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__): output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)) return output_tokens def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Tuple = [i for i in token_ids if i >= 0] __lowerCamelCase : str = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]): return token in self.encoder def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]): return "".join(SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token)) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): if os.path.isdir(SCREAMING_SNAKE_CASE__): __lowerCamelCase : Any = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: __lowerCamelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory __lowerCamelCase : Any = 0 if " " in self.encoder: __lowerCamelCase : Any = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: __lowerCamelCase : str = self.encoder['\n'] del self.encoder["\n"] __lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." ' Please check that the vocabulary is not corrupted!') __lowerCamelCase : Any = token_index writer.write(token + '\n') index += 1 return (vocab_file,) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__) if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
652
1
"""simple docstring""" from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig A = logging.get_logger(__name__) A = """T5Config""" class _UpperCamelCase ( lowerCamelCase__ ): """simple docstring""" snake_case_ = 'mt5' snake_case_ = MTaConfig class _UpperCamelCase ( lowerCamelCase__ ): """simple docstring""" snake_case_ = 'mt5' snake_case_ = MTaConfig class _UpperCamelCase ( lowerCamelCase__ ): """simple docstring""" snake_case_ = 'mt5' snake_case_ = MTaConfig
147
"""simple docstring""" from __future__ import annotations A = [] def UpperCamelCase_ ( lowerCamelCase : list[list[int]] , lowerCamelCase : int , lowerCamelCase : int ) -> bool: """simple docstring""" for i in range(len(lowerCamelCase ) ): if board[row][i] == 1: return False for i in range(len(lowerCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(lowerCamelCase , -1 , -1 ) , range(lowerCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(lowerCamelCase , -1 , -1 ) , range(lowerCamelCase , len(lowerCamelCase ) ) ): if board[i][j] == 1: return False return True def UpperCamelCase_ ( lowerCamelCase : list[list[int]] , lowerCamelCase : int ) -> bool: """simple docstring""" if row >= len(lowerCamelCase ): solution.append(lowerCamelCase ) printboard(lowerCamelCase ) print() return True for i in range(len(lowerCamelCase ) ): if is_safe(lowerCamelCase , lowerCamelCase , lowerCamelCase ): __magic_name__ : Tuple = 1 solve(lowerCamelCase , row + 1 ) __magic_name__ : Tuple = 0 return False def UpperCamelCase_ ( lowerCamelCase : list[list[int]] ) -> None: """simple docstring""" for i in range(len(lowerCamelCase ) ): for j in range(len(lowerCamelCase ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) A = 8 A = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("""The total no. of solutions are :""", len(solution))
147
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _a = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase( lowerCamelCase ): def __init__( self , __a , __a) -> str: '''simple docstring''' super().__init__() self.register_modules(unet=__a , scheduler=__a) @torch.no_grad() def __call__( self , __a = 1 , __a = 1_00 , __a = None , __a = None , __a = True , ) -> Union[AudioPipelineOutput, Tuple]: '''simple docstring''' if audio_length_in_s is None: _UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate _UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate _UpperCamelCase = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''') _UpperCamelCase = int(__a) if sample_size % down_scale_factor != 0: _UpperCamelCase = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' ''' process.''') _UpperCamelCase = int(__a) _UpperCamelCase = next(iter(self.unet.parameters())).dtype _UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(__a , __a) and len(__a) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(__a)}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''') _UpperCamelCase = randn_tensor(__a , generator=__a , device=self.device , dtype=__a) # set step values self.scheduler.set_timesteps(__a , device=audio.device) _UpperCamelCase = self.scheduler.timesteps.to(__a) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output _UpperCamelCase = self.unet(__a , __a).sample # 2. compute previous image: x_t -> t_t-1 _UpperCamelCase = self.scheduler.step(__a , __a , __a).prev_sample _UpperCamelCase = audio.clamp(-1 , 1).float().cpu().numpy() _UpperCamelCase = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=__a)
19
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __A : List[Any] = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __A ( datasets.BuilderConfig ): lowerCAmelCase_ : Optional[datasets.Features] = None def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, ) -> Union[str, Any]: '''simple docstring''' import pyspark def generate_fn(): lowerCAmelCase : str = df.select('*', pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: lowerCAmelCase : List[Any] = df_with_partition_id.select('*' ).where(f"part_id = {partition_id}" ).drop('part_id' ) lowerCAmelCase : Optional[Any] = partition_df.collect() lowerCAmelCase : Optional[Any] = 0 for row in rows: yield f"{partition_id}_{row_id}", row.asDict() row_id += 1 return generate_fn class __A ( _BaseExamplesIterable ): def __init__( self : Optional[Any] , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : List[Any]=None , ): lowerCAmelCase : Optional[Any] = df lowerCAmelCase : Any = partition_order or range(self.df.rdd.getNumPartitions() ) lowerCAmelCase : Union[str, Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Union[str, Any] ): yield from self.generate_examples_fn() def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : np.random.Generator ): lowerCAmelCase : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(UpperCAmelCase_ ) return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ ) def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): lowerCAmelCase : List[Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ , UpperCAmelCase_ ) return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ ) @property def lowercase__ ( self : Optional[Any] ): return len(self.partition_order ) class __A ( datasets.DatasetBuilder ): lowerCAmelCase_ : Any = SparkConfig def __init__( self : List[Any] , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : Optional[Any] , ): import pyspark lowerCAmelCase : Tuple = pyspark.sql.SparkSession.builder.getOrCreate() lowerCAmelCase : Dict = df lowerCAmelCase : Dict = working_dir super().__init__( cache_dir=UpperCAmelCase_ , config_name=str(self.df.semanticHash() ) , **UpperCAmelCase_ , ) def lowercase__ ( self : int ): # Returns the path of the created file. def create_cache_and_write_probe(UpperCAmelCase_ : Union[str, Any] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=UpperCAmelCase_ ) lowerCAmelCase : Any = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(UpperCAmelCase_ , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCAmelCase : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCAmelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def lowercase__ ( self : Optional[int] ): return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : Tuple , UpperCAmelCase_ : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ): import pyspark def get_arrow_batch_size(UpperCAmelCase_ : Tuple ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) lowerCAmelCase : int = self.df.count() lowerCAmelCase : str = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCAmelCase : Tuple = ( self.df.limit(UpperCAmelCase_ ) .repartition(1 ) .mapInArrow(UpperCAmelCase_ , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCAmelCase : Tuple = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCAmelCase : List[str] = min(UpperCAmelCase_ , int(approx_total_size / max_shard_size ) ) lowerCAmelCase : List[Any] = self.df.repartition(UpperCAmelCase_ ) def lowercase__ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int , ): import pyspark lowerCAmelCase : Tuple = ParquetWriter if file_format == 'parquet' else ArrowWriter lowerCAmelCase : int = os.path.join(self._working_dir , os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath lowerCAmelCase : str = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCAmelCase : int = self.config.features lowerCAmelCase : Union[str, Any] = self._writer_batch_size lowerCAmelCase : Tuple = self._fs.storage_options def write_arrow(UpperCAmelCase_ : int ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCAmelCase : List[Any] = pyspark.TaskContext().taskAttemptId() lowerCAmelCase : str = next(UpperCAmelCase_ , UpperCAmelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) lowerCAmelCase : str = 0 lowerCAmelCase : Union[str, Any] = writer_class( features=UpperCAmelCase_ , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , ) lowerCAmelCase : Union[str, Any] = pa.Table.from_batches([first_batch] ) writer.write_table(UpperCAmelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCAmelCase , lowerCAmelCase : Any = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 lowerCAmelCase : Tuple = writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , ) lowerCAmelCase : str = pa.Table.from_batches([batch] ) writer.write_table(UpperCAmelCase_ ) if writer._num_bytes > 0: lowerCAmelCase , lowerCAmelCase : int = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ): lowerCAmelCase : Optional[int] = os.path.join(os.path.dirname(UpperCAmelCase_ ) , os.path.basename(UpperCAmelCase_ ) ) shutil.move(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase : Optional[int] = ( self.df.mapInArrow(UpperCAmelCase_ , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowercase__ ( self : int , UpperCAmelCase_ : "datasets.SplitGenerator" , UpperCAmelCase_ : str = "arrow" , UpperCAmelCase_ : Optional[Union[str, int]] = None , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : str , ): self._validate_cache_dir() lowerCAmelCase : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(UpperCAmelCase_ ) lowerCAmelCase : Optional[int] = not is_remote_filesystem(self._fs ) lowerCAmelCase : Union[str, Any] = os.path.join if is_local else posixpath.join lowerCAmelCase : List[Any] = '-TTTTT-SSSSS-of-NNNNN' lowerCAmelCase : Optional[Any] = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}" lowerCAmelCase : int = path_join(self._output_dir , UpperCAmelCase_ ) lowerCAmelCase : Union[str, Any] = 0 lowerCAmelCase : Dict = 0 lowerCAmelCase : str = 0 lowerCAmelCase : Optional[Any] = [] lowerCAmelCase : List[Any] = [] for task_id, content in self._prepare_split_single(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) : Dict = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(UpperCAmelCase_ ) lowerCAmelCase : Dict = total_num_examples lowerCAmelCase : List[Any] = total_num_bytes # should rename everything at the end logger.debug(f"Renaming {total_shards} shards." ) if total_shards > 1: lowerCAmelCase : Tuple = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCAmelCase : str = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ): rename( UpperCAmelCase_ , fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace('TTTTT-SSSSS' , f"{global_shard_id:05d}" ).replace('NNNNN' , f"{total_shards:05d}" ) , ) lowerCAmelCase : int = [] lowerCAmelCase : List[Any] = 0 for i in range(len(UpperCAmelCase_ ) ): lowerCAmelCase , lowerCAmelCase : Dict = task_id_and_num_shards[i] for shard_id in range(UpperCAmelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(UpperCAmelCase_ , len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect() else: # don't use any pattern lowerCAmelCase : int = 0 lowerCAmelCase : Union[str, Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace(UpperCAmelCase_ , '' ) , ) def lowercase__ ( self : Tuple , UpperCAmelCase_ : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
343
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase__ : Optional[Any] = None lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : Union[str, Any] = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"} lowercase__ : Any = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, "tokenizer_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json", }, } lowercase__ : Union[str, Any] = { "google/rembert": 256, } lowercase__ : List[str] = "▁" class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = RemBertTokenizer def __init__( self : int , __lowercase : Any=None , __lowercase : Dict=None , __lowercase : List[str]=True , __lowercase : int=True , __lowercase : int=False , __lowercase : List[Any]="[CLS]" , __lowercase : Optional[Any]="[SEP]" , __lowercase : int="<unk>" , __lowercase : str="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : Optional[int]="[MASK]" , **__lowercase : Dict , ): """simple docstring""" snake_case_ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , ) snake_case_ = do_lower_case snake_case_ = remove_space snake_case_ = keep_accents snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True def snake_case__ ( self : Tuple , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): """simple docstring""" snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1] def snake_case__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): """simple docstring""" snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__ ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__lowercase ): logger.error("Vocabulary path ({}) should be a directory".format(__lowercase ) ) return snake_case_ = os.path.join( __lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
710
from __future__ import annotations def lowerCamelCase__ ( _A ): '''simple docstring''' snake_case_ = str(_A ) return n == n[::-1] def lowerCamelCase__ ( _A = 1000000 ): '''simple docstring''' snake_case_ = 0 for i in range(1 , _A ): if is_palindrome(_A ) and is_palindrome(bin(_A ).split("b" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
139
0
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: if exponent == 1: return base if exponent % 2 == 0: UpperCamelCase : List[Any] = _modexpt(_lowerCAmelCase , exponent // 2 , _lowerCAmelCase ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(_lowerCAmelCase , exponent - 1 , _lowerCAmelCase )) % modulo_value def A_ ( _lowerCAmelCase = 1777 , _lowerCAmelCase = 1855 , _lowerCAmelCase = 8 ) -> int: UpperCamelCase : Optional[Any] = base for _ in range(1 , _lowerCAmelCase ): UpperCamelCase : int = _modexpt(_lowerCAmelCase , _lowerCAmelCase , 10**digits ) return result if __name__ == "__main__": print(f"""{solution() = }""")
629
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __lowerCamelCase : List[str] = logging.get_logger(__name__) class A__ ( __snake_case ): _UpperCAmelCase :List[str] = ['input_features', 'attention_mask'] def __init__( self , A_=80 , A_=1_6000 , A_=80 , A_=0.0 , A_=True , A_=True , A_=True , **A_ , ): '''simple docstring''' super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ ) UpperCamelCase : List[Any] = num_mel_bins UpperCamelCase : Any = do_ceptral_normalize UpperCamelCase : int = normalize_means UpperCamelCase : Tuple = normalize_vars UpperCamelCase : List[Any] = True def __UpperCamelCase( self , A_ , ): '''simple docstring''' UpperCamelCase : Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers UpperCamelCase : Optional[Any] = torch.from_numpy(A_ ).unsqueeze(0 ) UpperCamelCase : Dict = ta_kaldi.fbank(A_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __UpperCamelCase( A_ , A_ , A_ = True , A_ = True , A_ = 0.0 , ): '''simple docstring''' if normalize_means: UpperCamelCase : str = x[:input_length].mean(axis=0 ) UpperCamelCase : Any = np.subtract(A_ , A_ ) if normalize_vars: UpperCamelCase : List[str] = x[:input_length].std(axis=0 ) UpperCamelCase : Optional[int] = np.divide(A_ , A_ ) if input_length < x.shape[0]: UpperCamelCase : Dict = padding_value # make sure array is in float32 UpperCamelCase : Union[str, Any] = x.astype(np.floataa ) return x def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(A_ , A_ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(A_ , A_ ) ] def __call__( self , A_ , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , A_ = None , **A_ , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) UpperCamelCase : str = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) UpperCamelCase : List[Any] = is_batched_numpy or ( isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase : List[str] = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A_ , np.ndarray ): UpperCamelCase : int = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase : Optional[int] = [raw_speech] # extract fbank features UpperCamelCase : Optional[int] = [self._extract_fbank_features(A_ ) for waveform in raw_speech] # convert into correct format for padding UpperCamelCase : List[Any] = BatchFeature({"input_features": features} ) UpperCamelCase : Any = self.pad( A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , ) # make sure list is in array format UpperCamelCase : Optional[int] = padded_inputs.get("input_features" ) if isinstance(input_features[0] , A_ ): UpperCamelCase : str = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features] UpperCamelCase : Optional[int] = padded_inputs.get("attention_mask" ) if attention_mask is not None: UpperCamelCase : Optional[int] = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: UpperCamelCase : List[Any] = ( np.array(A_ , dtype=np.intaa ) if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD else None ) UpperCamelCase : int = self.normalize( padded_inputs["input_features"] , attention_mask=A_ ) if return_tensors is not None: UpperCamelCase : List[str] = padded_inputs.convert_to_tensors(A_ ) return padded_inputs
629
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ = { '''roberta-base''': 5_1_2, '''roberta-large''': 5_1_2, '''roberta-large-mnli''': 5_1_2, '''distilroberta-base''': 5_1_2, '''roberta-base-openai-detector''': 5_1_2, '''roberta-large-openai-detector''': 5_1_2, } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ["""input_ids""", """attention_mask"""] lowercase_ = RobertaTokenizer def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Any="replace" , SCREAMING_SNAKE_CASE : List[str]="<s>" , SCREAMING_SNAKE_CASE : Tuple="</s>" , SCREAMING_SNAKE_CASE : Any="</s>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE : Any="<unk>" , SCREAMING_SNAKE_CASE : Any="<pad>" , SCREAMING_SNAKE_CASE : List[str]="<mask>" , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Optional[int]=True , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): super().__init__( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , errors=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) lowercase__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space: lowercase__ : Tuple = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop("type" ) ) lowercase__ : str = add_prefix_space lowercase__ : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = add_prefix_space lowercase__ : List[str] = "post_processor" lowercase__ : List[Any] = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if tokenizer_component_instance: lowercase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ : List[Any] = tuple(state["sep"] ) if "cls" in state: lowercase__ : Union[str, Any] = tuple(state["cls"] ) lowercase__ : str = False if state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space: lowercase__ : Tuple = add_prefix_space lowercase__ : Dict = True if state.get("trim_offsets" , SCREAMING_SNAKE_CASE ) != trim_offsets: lowercase__ : int = trim_offsets lowercase__ : Optional[int] = True if changes_to_apply: lowercase__ : List[str] = getattr(SCREAMING_SNAKE_CASE , state.pop("type" ) ) lowercase__ : List[str] = component_class(**SCREAMING_SNAKE_CASE ) setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @property def snake_case ( self : str ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Any ): lowercase__ : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else value lowercase__ : Union[str, Any] = value def snake_case ( self : Tuple , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ): lowercase__ : str = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Dict = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ): lowercase__ : List[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE ) return tuple(SCREAMING_SNAKE_CASE ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=None ): lowercase__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ): lowercase__ : Optional[int] = [self.sep_token_id] lowercase__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
81
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """deformable_detr""" lowercase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : List[Any] = backbone_config.get("model_type" ) lowercase__ : Any = CONFIG_MAPPING[backbone_model_type] lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE ) lowercase__ : int = use_timm_backbone lowercase__ : Optional[Any] = backbone_config lowercase__ : Union[str, Any] = num_channels lowercase__ : List[Any] = num_queries lowercase__ : List[Any] = max_position_embeddings lowercase__ : Union[str, Any] = d_model lowercase__ : Union[str, Any] = encoder_ffn_dim lowercase__ : Optional[Any] = encoder_layers lowercase__ : Optional[Any] = encoder_attention_heads lowercase__ : Optional[Any] = decoder_ffn_dim lowercase__ : List[Any] = decoder_layers lowercase__ : Optional[int] = decoder_attention_heads lowercase__ : str = dropout lowercase__ : Union[str, Any] = attention_dropout lowercase__ : List[str] = activation_dropout lowercase__ : Optional[Any] = activation_function lowercase__ : Optional[Any] = init_std lowercase__ : str = init_xavier_std lowercase__ : Any = encoder_layerdrop lowercase__ : int = auxiliary_loss lowercase__ : Dict = position_embedding_type lowercase__ : int = backbone lowercase__ : Optional[Any] = use_pretrained_backbone lowercase__ : List[Any] = dilation # deformable attributes lowercase__ : Dict = num_feature_levels lowercase__ : Optional[int] = encoder_n_points lowercase__ : Any = decoder_n_points lowercase__ : int = two_stage lowercase__ : int = two_stage_num_proposals lowercase__ : Union[str, Any] = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowercase__ : List[Any] = class_cost lowercase__ : Optional[int] = bbox_cost lowercase__ : Any = giou_cost # Loss coefficients lowercase__ : List[str] = mask_loss_coefficient lowercase__ : int = dice_loss_coefficient lowercase__ : Any = bbox_loss_coefficient lowercase__ : Any = giou_loss_coefficient lowercase__ : Optional[int] = eos_coefficient lowercase__ : int = focal_alpha lowercase__ : Dict = disable_custom_kernels super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def snake_case ( self : List[Any] ): return self.encoder_attention_heads @property def snake_case ( self : Union[str, Any] ): return self.d_model def snake_case ( self : str ): lowercase__ : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowercase__ : int = self.backbone_config.to_dict() lowercase__ : Union[str, Any] = self.__class__.model_type return output
81
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __snake_case: Dict = logging.get_logger(__name__) def _snake_case ( A_ : Dict ): """simple docstring""" if isinstance(A_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(A_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(A_ ): return [[videos]] raise ValueError(f'''Could not make batched video from {videos}''' ) class _UpperCAmelCase ( lowerCAmelCase__ ): """simple docstring""" a_ = ["pixel_values"] def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): '''simple docstring''' super().__init__(**lowerCAmelCase_ ) a_ : Any = size if size is not None else {"""shortest_edge""": 2_24} a_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) a_ : List[str] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} a_ : Optional[Any] = get_size_dict(lowerCAmelCase_ , param_name="""crop_size""" ) a_ : Optional[int] = do_resize a_ : Optional[int] = size a_ : Optional[int] = do_center_crop a_ : Tuple = crop_size a_ : List[Any] = resample a_ : List[str] = do_rescale a_ : List[Any] = rescale_factor a_ : Optional[Any] = do_normalize a_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): '''simple docstring''' a_ : List[str] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) if "shortest_edge" in size: a_ : Tuple = get_resize_output_image_size(lowerCAmelCase_ , size["""shortest_edge"""] , default_to_square=lowerCAmelCase_ ) elif "height" in size and "width" in size: a_ : List[str] = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): '''simple docstring''' a_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(lowerCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): '''simple docstring''' return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): '''simple docstring''' return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. a_ : str = to_numpy_array(lowerCAmelCase_ ) if do_resize: a_ : str = self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) if do_center_crop: a_ : str = self.center_crop(lowerCAmelCase_ , size=lowerCAmelCase_ ) if do_rescale: a_ : Tuple = self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) if do_normalize: a_ : List[Any] = self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) a_ : Optional[Any] = to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) return image def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ): '''simple docstring''' a_ : List[str] = do_resize if do_resize is not None else self.do_resize a_ : Dict = resample if resample is not None else self.resample a_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop a_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale a_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor a_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize a_ : str = image_mean if image_mean is not None else self.image_mean a_ : int = image_std if image_std is not None else self.image_std a_ : str = size if size is not None else self.size a_ : Optional[Any] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) a_ : Dict = crop_size if crop_size is not None else self.crop_size a_ : List[str] = get_size_dict(lowerCAmelCase_ , param_name="""crop_size""" ) if not valid_images(lowerCAmelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) a_ : List[Any] = make_batched(lowerCAmelCase_ ) a_ : Any = [ [ self._preprocess_image( image=lowerCAmelCase_ , do_resize=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , do_center_crop=lowerCAmelCase_ , crop_size=lowerCAmelCase_ , do_rescale=lowerCAmelCase_ , rescale_factor=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , ) for img in video ] for video in videos ] a_ : Union[str, Any] = {"""pixel_values""": videos} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
577
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _snake_case ( A_ : List[str] ): """simple docstring""" a_ , a_ : Optional[int] = image.size a_ , a_ : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 a_ : List[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) a_ : Tuple = np.array(A_ ).astype(np.floataa ) / 255.0 a_ : Tuple = image[None].transpose(0 , 3 , 1 , 2 ) a_ : Tuple = torch.from_numpy(A_ ) return 2.0 * image - 1.0 class _UpperCAmelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) @torch.no_grad() def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ): '''simple docstring''' if isinstance(lowerCAmelCase_ , PIL.Image.Image ): a_ : str = 1 elif isinstance(lowerCAmelCase_ , torch.Tensor ): a_ : Tuple = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase_ )}''' ) if isinstance(lowerCAmelCase_ , PIL.Image.Image ): a_ : Any = preprocess(lowerCAmelCase_ ) a_ , a_ : Dict = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image a_ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width) a_ : str = next(self.unet.parameters() ).dtype a_ : Dict = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ ) a_ : Optional[Any] = image.to(device=self.device , dtype=lowerCAmelCase_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(lowerCAmelCase_ , device=self.device ) a_ : Dict = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler a_ : Tuple = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a_ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a_ : int = {} if accepts_eta: a_ : Union[str, Any] = eta for t in self.progress_bar(lowerCAmelCase_ ): # concat latents and low resolution image in the channel dimension. a_ : int = torch.cat([latents, image] , dim=1 ) a_ : Dict = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) # predict the noise residual a_ : Optional[Any] = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample # compute the previous noisy sample x_t -> x_t-1 a_ : Tuple = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample # decode the image latents with the VQVAE a_ : List[str] = self.vqvae.decode(lowerCAmelCase_ ).sample a_ : Tuple = torch.clamp(lowerCAmelCase_ , -1.0 , 1.0 ) a_ : Optional[int] = image / 2 + 0.5 a_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a_ : str = self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase_ )
577
1
from __future__ import annotations def a__ ( lowercase__ ): '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
712
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase : Union[str, Any] =logging.get_logger(__name__) __lowercase : List[Any] ={ """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class A ( __lowercase ): _snake_case ='''gptsan-japanese''' _snake_case =[ '''past_key_values''', ] _snake_case ={ '''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self: Optional[Any] , _lowerCAmelCase: List[Any]=3_6000 , _lowerCAmelCase: List[Any]=1280 , _lowerCAmelCase: str=1024 , _lowerCAmelCase: Any=8192 , _lowerCAmelCase: str=4096 , _lowerCAmelCase: int=128 , _lowerCAmelCase: int=10 , _lowerCAmelCase: Dict=0 , _lowerCAmelCase: Any=16 , _lowerCAmelCase: Optional[int]=16 , _lowerCAmelCase: List[Any]=128 , _lowerCAmelCase: Tuple=0.0 , _lowerCAmelCase: Optional[Any]=1e-5 , _lowerCAmelCase: int=False , _lowerCAmelCase: Optional[Any]=0.0 , _lowerCAmelCase: str="float32" , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Any=False , _lowerCAmelCase: int=False , _lowerCAmelCase: Union[str, Any]=0.0_02 , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: int=True , _lowerCAmelCase: List[str]=3_5998 , _lowerCAmelCase: Optional[int]=3_5995 , _lowerCAmelCase: Dict=3_5999 , **_lowerCAmelCase: Optional[Any] , ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ =vocab_size UpperCAmelCase_ =max_position_embeddings UpperCAmelCase_ =d_model UpperCAmelCase_ =d_ff UpperCAmelCase_ =d_ext UpperCAmelCase_ =d_spout UpperCAmelCase_ =num_switch_layers UpperCAmelCase_ =num_ext_layers UpperCAmelCase_ =num_switch_layers + num_ext_layers UpperCAmelCase_ =num_heads UpperCAmelCase_ =num_experts UpperCAmelCase_ =expert_capacity UpperCAmelCase_ =dropout_rate UpperCAmelCase_ =layer_norm_epsilon UpperCAmelCase_ =router_bias UpperCAmelCase_ =router_jitter_noise UpperCAmelCase_ =router_dtype UpperCAmelCase_ =router_ignore_padding_tokens UpperCAmelCase_ =output_hidden_states UpperCAmelCase_ =output_attentions UpperCAmelCase_ =initializer_factor UpperCAmelCase_ =output_router_logits UpperCAmelCase_ =use_cache super().__init__( separator_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
550
0
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( "stable diffusion controlnet", "0.22.0", "Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.", standard_warn=False, stacklevel=3, )
10
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py _A = '.' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) _A = [ 'Assert', 'AssignVariableOp', 'EmptyTensorList', 'MergeV2Checkpoints', 'ReadVariableOp', 'ResourceGather', 'RestoreV2', 'SaveV2', 'ShardedFilename', 'StatefulPartitionedCall', 'StaticRegexFullMatch', 'VarHandleOp', ] def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: SCREAMING_SNAKE_CASE__ = SavedModel() SCREAMING_SNAKE_CASE__ = [] with open(os.path.join(__UpperCAmelCase , "utils" , "tf_ops" , "onnx.json" ) ) as f: SCREAMING_SNAKE_CASE__ = json.load(__UpperCAmelCase )["opsets"] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(__UpperCAmelCase )] ) with open(__UpperCAmelCase , "rb" ) as f: saved_model.ParseFromString(f.read() ) SCREAMING_SNAKE_CASE__ = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want SCREAMING_SNAKE_CASE__ = sorted(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__UpperCAmelCase ) if strict and len(__UpperCAmelCase ) > 0: raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops ) elif len(__UpperCAmelCase ) > 0: print(F"""Found the following incompatible ops for the opset {opset}:""" ) print(*__UpperCAmelCase , sep="\n" ) else: print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).') parser.add_argument( '--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.' ) parser.add_argument( '--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.' ) parser.add_argument( '--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)' ) _A = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
159
0
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class UpperCamelCase__ : """simple docstring""" UpperCAmelCase__ = 42 # [batch_size x 3] UpperCAmelCase__ = 42 # [batch_size x 3] UpperCAmelCase__ = 42 # [batch_size x 3] UpperCAmelCase__ = 42 # [batch_size x 3] UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 def snake_case ( self : Tuple ): """simple docstring""" assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def snake_case ( self : int ): """simple docstring""" return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def snake_case ( self : int ): """simple docstring""" return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def snake_case ( self : Optional[Any] ): """simple docstring""" _lowercase = torch.arange(self.height * self.width ) _lowercase = torch.stack( [ pixel_indices % self.width, torch.div(__A , self.width , rounding_mode="trunc" ), ] , axis=1 , ) return coords @property def snake_case ( self : Optional[Any] ): """simple docstring""" _lowercase , *_lowercase = self.shape _lowercase = int(np.prod(__A ) ) _lowercase = self.get_image_coords() _lowercase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) _lowercase = self.get_camera_rays(__A ) _lowercase = rays.view(__A , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def snake_case ( self : Dict , __A : torch.Tensor ): """simple docstring""" _lowercase , *_lowercase , _lowercase = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] _lowercase = coords.view(__A , -1 , 2 ) _lowercase = self.resolution() _lowercase = self.fov() _lowercase = (flat.float() / (res - 1)) * 2 - 1 _lowercase = fracs * torch.tan(fov / 2 ) _lowercase = fracs.view(__A , -1 , 2 ) _lowercase = ( self.z.view(__A , 1 , 3 ) + self.x.view(__A , 1 , 3 ) * fracs[:, :, :1] + self.y.view(__A , 1 , 3 ) * fracs[:, :, 1:] ) _lowercase = directions / directions.norm(dim=-1 , keepdim=__A ) _lowercase = torch.stack( [ torch.broadcast_to(self.origin.view(__A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(__A , *__A , 2 , 3 ) def snake_case ( self : Tuple , __A : int , __A : int ): """simple docstring""" assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=__A , height=__A , x_fov=self.x_fov , y_fov=self.y_fov , ) def A__ ( A_ ) -> DifferentiableProjectiveCamera: _lowercase = [] _lowercase = [] _lowercase = [] _lowercase = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): _lowercase = np.array([np.sin(A_ ), np.cos(A_ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) _lowercase = -z * 4 _lowercase = np.array([np.cos(A_ ), -np.sin(A_ ), 0.0] ) _lowercase = np.cross(A_ , A_ ) origins.append(A_ ) xs.append(A_ ) ys.append(A_ ) zs.append(A_ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(A_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A_ , axis=0 ) ).float() , width=A_ , height=A_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A_ )) , )
602
'''simple docstring''' from __future__ import annotations class UpperCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , __A : str=None ): """simple docstring""" _lowercase = data _lowercase = None def __repr__( self : Optional[int] ): """simple docstring""" _lowercase = [] _lowercase = self while temp: string_rep.append(f"""{temp.data}""" ) _lowercase = temp.next return "->".join(__A ) def A__ ( A_ ) -> Any: if not elements_list: raise Exception("The Elements List is empty" ) _lowercase = _lowercase = Node(elements_list[0] ) for i in range(1 , len(A_ ) ): _lowercase = Node(elements_list[i] ) _lowercase = current.next return head def A__ ( A_ ) -> None: if head_node is not None and isinstance(A_ , A_ ): print_reverse(head_node.next ) print(head_node.data ) def A__ ( ) -> Union[str, Any]: from doctest import testmod testmod() _lowercase = make_linked_list([14, 52, 14, 12, 43] ) print("Linked List:" ) print(A_ ) print("Elements in Reverse:" ) print_reverse(A_ ) if __name__ == "__main__": main()
602
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowerCAmelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
43
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = '▁' lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} lowerCAmelCase = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } lowerCAmelCase = {'vinai/bartpho-syllable': 1024} class _a ( UpperCamelCase__ ): _lowercase : Tuple = VOCAB_FILES_NAMES _lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None: """simple docstring""" lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) lowercase__ = vocab_file lowercase__ = monolingual_vocab_file lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility lowercase__ = {} lowercase__ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = cnt cnt += 1 with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): lowercase__ = line.strip().split()[0] lowercase__ = len(self.fairseq_tokens_to_ids ) if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = len(self.fairseq_tokens_to_ids ) lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self: Tuple ) -> int: """simple docstring""" lowercase__ = self.__dict__.copy() lowercase__ = None lowercase__ = self.sp_model.serialized_model_proto() return state def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]: """simple docstring""" lowercase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]: """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict: """simple docstring""" lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(UpperCamelCase_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
43
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCamelCase =logging.get_logger(__name__) lowerCamelCase ={ "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class _lowerCamelCase ( UpperCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'codegen' SCREAMING_SNAKE_CASE_ = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __SCREAMING_SNAKE_CASE=5_0_4_0_0 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=4_0_9_6 , __SCREAMING_SNAKE_CASE=2_8 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=5_0_2_5_6 , __SCREAMING_SNAKE_CASE=5_0_2_5_6 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" UpperCamelCase__ : Optional[Any] = vocab_size UpperCamelCase__ : Any = n_ctx UpperCamelCase__ : str = n_positions UpperCamelCase__ : Any = n_embd UpperCamelCase__ : List[Any] = n_layer UpperCamelCase__ : Tuple = n_head UpperCamelCase__ : List[Any] = n_inner UpperCamelCase__ : List[Any] = rotary_dim UpperCamelCase__ : Tuple = activation_function UpperCamelCase__ : Any = resid_pdrop UpperCamelCase__ : List[str] = embd_pdrop UpperCamelCase__ : List[Any] = attn_pdrop UpperCamelCase__ : Optional[Any] = layer_norm_epsilon UpperCamelCase__ : Optional[Any] = initializer_range UpperCamelCase__ : Tuple = use_cache UpperCamelCase__ : Any = bos_token_id UpperCamelCase__ : str = eos_token_id super().__init__( bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class _lowerCamelCase ( UpperCAmelCase__ ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "default" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE ) if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ): # TODO: how to do that better? UpperCamelCase__ : Dict = 0 @property def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" UpperCamelCase__ : Optional[int] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' ) UpperCamelCase__ : Any = {0: """batch""", 1: """past_sequence + sequence"""} else: UpperCamelCase__ : Optional[Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def __SCREAMING_SNAKE_CASE ( self ) -> int: """simple docstring""" return self._config.n_layer @property def __SCREAMING_SNAKE_CASE ( self ) -> int: """simple docstring""" return self._config.n_head def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" UpperCamelCase__ : Union[str, Any] = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( __SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() UpperCamelCase__ : int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCamelCase__ : int = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values UpperCamelCase__ : Dict = seqlen + 2 UpperCamelCase__ : List[str] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCamelCase__ : Any = [ (torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] UpperCamelCase__ : Dict = common_inputs["""attention_mask"""] if self.use_past: UpperCamelCase__ : int = ordered_inputs["""attention_mask"""].dtype UpperCamelCase__ : Any = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __SCREAMING_SNAKE_CASE ( self ) -> int: """simple docstring""" return 1_3
714
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING lowerCamelCase =logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase_ ) class _lowerCamelCase ( UpperCamelCase_ ): """simple docstring""" def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) requires_backends(self , '''decord''' ) self.check_model_type(__SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ : List[str] = {} if frame_sampling_rate is not None: UpperCamelCase__ : Tuple = frame_sampling_rate if num_frames is not None: UpperCamelCase__ : str = num_frames UpperCamelCase__ : List[str] = {} if top_k is not None: UpperCamelCase__ : List[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1 ) -> Optional[Any]: """simple docstring""" if num_frames is None: UpperCamelCase__ : Optional[Any] = self.model.config.num_frames if video.startswith('''http://''' ) or video.startswith('''https://''' ): UpperCamelCase__ : str = BytesIO(requests.get(__SCREAMING_SNAKE_CASE ).content ) UpperCamelCase__ : Tuple = VideoReader(__SCREAMING_SNAKE_CASE ) videoreader.seek(0 ) UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : str = num_frames * frame_sampling_rate - 1 UpperCamelCase__ : List[Any] = np.linspace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num=__SCREAMING_SNAKE_CASE , dtype=np.intaa ) UpperCamelCase__ : str = videoreader.get_batch(__SCREAMING_SNAKE_CASE ).asnumpy() UpperCamelCase__ : Tuple = list(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Dict = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) return model_inputs def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ : Tuple = self.model(**__SCREAMING_SNAKE_CASE ) return model_outputs def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 ) -> str: """simple docstring""" if top_k > self.model.config.num_labels: UpperCamelCase__ : Dict = self.model.config.num_labels if self.framework == "pt": UpperCamelCase__ : Any = model_outputs.logits.softmax(-1 )[0] UpperCamelCase__ ,UpperCamelCase__ : List[str] = probs.topk(__SCREAMING_SNAKE_CASE ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) UpperCamelCase__ : Any = scores.tolist() UpperCamelCase__ : Any = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
462
0
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller _lowerCamelCase : List[str] = 3 def _UpperCAmelCase (UpperCamelCase_ : int ): '''simple docstring''' print("""Generating primitive root of p""" ) while True: _lowerCAmelCase : str = random.randrange(3 , UpperCamelCase_ ) if pow(UpperCamelCase_ , 2 , UpperCamelCase_ ) == 1: continue if pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) == 1: continue return g def _UpperCAmelCase (UpperCamelCase_ : int ): '''simple docstring''' print("""Generating prime p...""" ) _lowerCAmelCase : Optional[Any] = rabin_miller.generate_large_prime(UpperCamelCase_ ) # select large prime number. _lowerCAmelCase : str = primitive_root(UpperCamelCase_ ) # one primitive root on modulo p. _lowerCAmelCase : Any = random.randrange(3 , UpperCamelCase_ ) # private_key -> have to be greater than 2 for safety. _lowerCAmelCase : List[Any] = cryptomath.find_mod_inverse(pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) _lowerCAmelCase : str = (key_size, e_a, e_a, p) _lowerCAmelCase : str = (key_size, d) return public_key, private_key def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : int ): '''simple docstring''' if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ): print("""\nWARNING:""" ) print( F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n" """Use a different name or delete these files and re-run this program.""" ) sys.exit() _lowerCAmelCase , _lowerCAmelCase : int = generate_key(UpperCamelCase_ ) print(F"\nWriting public key to file {name}_pubkey.txt..." ) with open(F"{name}_pubkey.txt" , """w""" ) as fo: fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" ) print(F"Writing private key to file {name}_privkey.txt..." ) with open(F"{name}_privkey.txt" , """w""" ) as fo: fo.write(F"{private_key[0]},{private_key[1]}" ) def _UpperCAmelCase (): '''simple docstring''' print("""Making key files...""" ) make_key_files("""elgamal""" , 2048 ) print("""Key files generation successful""" ) if __name__ == "__main__": main()
429
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class __snake_case (_a ): lowerCAmelCase__ = "naver-clova-ix/donut-base-finetuned-docvqa" lowerCAmelCase__ = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) lowerCAmelCase__ = "document_qa" lowerCAmelCase__ = AutoProcessor lowerCAmelCase__ = VisionEncoderDecoderModel lowerCAmelCase__ = ["image", "text"] lowerCAmelCase__ = ["text"] def __init__( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]: '''simple docstring''' if not is_vision_available(): raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Optional[Any]: '''simple docstring''' _lowerCAmelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" _lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , _UpperCAmelCase ) _lowerCAmelCase : str = self.pre_processor.tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors="""pt""" ).input_ids _lowerCAmelCase : Dict = self.pre_processor(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]: '''simple docstring''' return self.model.generate( inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Tuple ) -> List[str]: '''simple docstring''' _lowerCAmelCase : Dict = self.pre_processor.batch_decode(_UpperCAmelCase )[0] _lowerCAmelCase : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" ) _lowerCAmelCase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" ) _lowerCAmelCase : List[Any] = re.sub(R"""<.*?>""" , """""" , _UpperCAmelCase , count=1 ).strip() # remove first task start token _lowerCAmelCase : Tuple = self.pre_processor.tokenajson(_UpperCAmelCase ) return sequence["answer"]
429
1
"""simple docstring""" from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run _lowerCamelCase : List[str] = True except (ImportError, AttributeError): _lowerCamelCase : int = object def lowercase_ ( *_UpperCAmelCase , **_UpperCAmelCase ): """simple docstring""" pass _lowerCamelCase : Optional[int] = False _lowerCamelCase : Optional[Any] = logging.get_logger('transformers-cli/serving') def lowercase_ ( _UpperCAmelCase ): """simple docstring""" A_ : int = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(_UpperCAmelCase , args.host , args.port , args.workers ) class lowercase ( __UpperCAmelCase): __lowerCAmelCase : dict class lowercase ( __UpperCAmelCase): __lowerCAmelCase : List[str] __lowerCAmelCase : Optional[List[int]] class lowercase ( __UpperCAmelCase): __lowerCAmelCase : str class lowercase ( __UpperCAmelCase): __lowerCAmelCase : Any class lowercase ( __UpperCAmelCase): @staticmethod def a_ ( _lowerCamelCase : ArgumentParser ): """simple docstring""" A_ : Optional[int] = parser.add_parser( '''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' ) serve_parser.add_argument( '''--task''' , type=_lowerCamelCase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , ) serve_parser.add_argument('''--host''' , type=_lowerCamelCase , default='''localhost''' , help='''Interface the server will listen on.''' ) serve_parser.add_argument('''--port''' , type=_lowerCamelCase , default=88_88 , help='''Port the serving will listen to.''' ) serve_parser.add_argument('''--workers''' , type=_lowerCamelCase , default=1 , help='''Number of http workers''' ) serve_parser.add_argument('''--model''' , type=_lowerCamelCase , help='''Model\'s name or path to stored model.''' ) serve_parser.add_argument('''--config''' , type=_lowerCamelCase , help='''Model\'s config name or path to stored model.''' ) serve_parser.add_argument('''--tokenizer''' , type=_lowerCamelCase , help='''Tokenizer name to use.''' ) serve_parser.add_argument( '''--device''' , type=_lowerCamelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) serve_parser.set_defaults(func=_lowerCamelCase ) def __init__( self : Optional[Any] , _lowerCamelCase : Pipeline , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" A_ : Optional[int] = pipeline A_ : Optional[Any] = host A_ : List[Any] = port A_ : List[str] = workers if not _serve_dependencies_installed: raise RuntimeError( '''Using serve command requires FastAPI and uvicorn. ''' '''Please install transformers with [serving]: pip install "transformers[serving]".''' '''Or install FastAPI and uvicorn separately.''' ) else: logger.info(F"""Serving model over {host}:{port}""" ) A_ : List[Any] = FastAPI( routes=[ APIRoute( '''/''' , self.model_info , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''GET'''] , ), APIRoute( '''/tokenize''' , self.tokenize , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''POST'''] , ), APIRoute( '''/detokenize''' , self.detokenize , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''POST'''] , ), APIRoute( '''/forward''' , self.forward , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''POST'''] , ), ] , timeout=6_00 , ) def a_ ( self : Union[str, Any] ): """simple docstring""" run(self._app , host=self.host , port=self.port , workers=self.workers ) def a_ ( self : Any ): """simple docstring""" return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def a_ ( self : Union[str, Any] , _lowerCamelCase : str = Body(_lowerCamelCase , embed=_lowerCamelCase ) , _lowerCamelCase : bool = Body(_lowerCamelCase , embed=_lowerCamelCase ) ): """simple docstring""" try: A_ : Optional[int] = self._pipeline.tokenizer.tokenize(_lowerCamelCase ) if return_ids: A_ : str = self._pipeline.tokenizer.convert_tokens_to_ids(_lowerCamelCase ) return ServeTokenizeResult(tokens=_lowerCamelCase , tokens_ids=_lowerCamelCase ) else: return ServeTokenizeResult(tokens=_lowerCamelCase ) except Exception as e: raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(_lowerCamelCase )} ) def a_ ( self : Any , _lowerCamelCase : List[int] = Body(_lowerCamelCase , embed=_lowerCamelCase ) , _lowerCamelCase : bool = Body(_lowerCamelCase , embed=_lowerCamelCase ) , _lowerCamelCase : bool = Body(_lowerCamelCase , embed=_lowerCamelCase ) , ): """simple docstring""" try: A_ : List[Any] = self._pipeline.tokenizer.decode(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return ServeDeTokenizeResult(model='''''' , text=_lowerCamelCase ) except Exception as e: raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(_lowerCamelCase )} ) async def a_ ( self : str , _lowerCamelCase : Optional[int]=Body(_lowerCamelCase , embed=_lowerCamelCase ) ): """simple docstring""" if len(_lowerCamelCase ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model A_ : Dict = self._pipeline(_lowerCamelCase ) return ServeForwardResult(output=_lowerCamelCase ) except Exception as e: raise HTTPException(5_00 , {'''error''': str(_lowerCamelCase )} )
361
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _lowerCamelCase : Optional[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class lowercase ( unittest.TestCase): __lowerCAmelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __lowerCAmelCase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __lowerCAmelCase : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __lowerCAmelCase : List[Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def a_ ( self : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Any ): """simple docstring""" A_ : int = ZeroShotClassificationPipeline( model=_lowerCamelCase , tokenizer=_lowerCamelCase , candidate_labels=['''polics''', '''health'''] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def a_ ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : List[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' ) self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} ) # No kwarg A_ : Tuple = classifier('''Who are you voting for in 2020?''' , ['''politics'''] ) self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} ) A_ : List[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] ) self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} ) A_ : Union[str, Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' ) self.assertEqual( _lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) A_ : Tuple = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] ) self.assertEqual( _lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) A_ : List[str] = classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' ) self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} ) # https://github.com/huggingface/transformers/issues/13846 A_ : str = classifier(['''I am happy'''] , ['''positive''', '''negative'''] ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} for i in range(1 ) ] , ) A_ : str = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} for i in range(2 ) ] , ) with self.assertRaises(_lowerCamelCase ): classifier('''''' , candidate_labels='''politics''' ) with self.assertRaises(_lowerCamelCase ): classifier(_lowerCamelCase , candidate_labels='''politics''' ) with self.assertRaises(_lowerCamelCase ): classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' ) with self.assertRaises(_lowerCamelCase ): classifier('''Who are you voting for in 2020?''' , candidate_labels=_lowerCamelCase ) with self.assertRaises(_lowerCamelCase ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , ) with self.assertRaises(_lowerCamelCase ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_lowerCamelCase , ) self.run_entailment_id(_lowerCamelCase ) def a_ ( self : Any , _lowerCamelCase : Pipeline ): """simple docstring""" A_ : int = zero_shot_classifier.model.config A_ : Dict = config.labelaid A_ : Optional[int] = zero_shot_classifier.entailment_id A_ : Optional[Any] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) A_ : Union[str, Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) A_ : int = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) A_ : Optional[Any] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) A_ : List[Any] = original_labelaid self.assertEqual(_lowerCamelCase , zero_shot_classifier.entailment_id ) @require_torch def a_ ( self : List[Any] ): """simple docstring""" A_ : List[str] = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( '''Who are you voting for in 2020?''' * 1_00 , candidate_labels=['''politics''', '''public health''', '''science'''] ) @require_torch def a_ ( self : Dict ): """simple docstring""" A_ : Optional[Any] = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) A_ : Optional[Any] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(_lowerCamelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.333, 0.333, 0.333], } , ) @require_tf def a_ ( self : Dict ): """simple docstring""" A_ : List[str] = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , ) A_ : List[str] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(_lowerCamelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.333, 0.333, 0.333], } , ) @slow @require_torch def a_ ( self : int ): """simple docstring""" A_ : Union[str, Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' ) A_ : str = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(_lowerCamelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.976, 0.015, 0.009], } , ) A_ : str = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_lowerCamelCase , ) self.assertEqual( nested_simplify(_lowerCamelCase ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.817, 0.713, 0.018, 0.018], } , ) @slow @require_tf def a_ ( self : Union[str, Any] ): """simple docstring""" A_ : Tuple = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' ) A_ : str = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(_lowerCamelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.976, 0.015, 0.009], } , ) A_ : Tuple = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_lowerCamelCase , ) self.assertEqual( nested_simplify(_lowerCamelCase ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.817, 0.713, 0.018, 0.018], } , )
361
1
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class __A : def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ): __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : Optional[Any] = batch_size __UpperCAmelCase : Optional[int] = seq_length __UpperCAmelCase : Any = is_training __UpperCAmelCase : int = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Any = num_hidden_layers __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : str = hidden_act __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Dict = max_position_embeddings __UpperCAmelCase : str = type_vocab_size __UpperCAmelCase : Any = type_sequence_label_size __UpperCAmelCase : List[str] = initializer_range __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[str] = num_choices __UpperCAmelCase : int = scope def _snake_case ( self ): __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: __UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Dict = None __UpperCAmelCase : Any = None __UpperCAmelCase : List[Any] = None if self.use_labels: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ): return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : str = NystromformerModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __UpperCAmelCase : List[str] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __UpperCAmelCase : List[str] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = NystromformerForMaskedLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : List[str] = NystromformerForQuestionAnswering(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Union[str, Any] = self.num_labels __UpperCAmelCase : int = NystromformerForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : Tuple = self.num_labels __UpperCAmelCase : int = NystromformerForTokenClassification(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __UpperCAmelCase : int = self.num_choices __UpperCAmelCase : Any = NystromformerForMultipleChoice(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : str = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): __UpperCAmelCase : Tuple = self.prepare_config_and_inputs() ( __UpperCAmelCase ) : Union[str, Any] = config_and_inputs __UpperCAmelCase : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A (A_ , A_ , unittest.TestCase ): snake_case :str = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) snake_case :Tuple = ( { "feature-extraction": NystromformerModel, "fill-mask": NystromformerForMaskedLM, "question-answering": NystromformerForQuestionAnswering, "text-classification": NystromformerForSequenceClassification, "token-classification": NystromformerForTokenClassification, "zero-shot": NystromformerForSequenceClassification, } if is_torch_available() else {} ) snake_case :Optional[int] = False snake_case :int = False def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = NystromformerModelTester(self ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : int = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ ) @slow def _snake_case ( self ): for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Dict = NystromformerModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @require_torch class __A (unittest.TestCase ): @slow def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" ) __UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __UpperCAmelCase : List[Any] = model(UpperCamelCase_ )[0] __UpperCAmelCase : Any = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , UpperCamelCase_ ) __UpperCAmelCase : List[str] = torch.tensor( [[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) @slow def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = """the [MASK] of Belgium is Brussels""" __UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" ) __UpperCAmelCase : List[Any] = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" ) __UpperCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors="pt" ) with torch.no_grad(): __UpperCAmelCase : Tuple = model(encoding.input_ids ).logits __UpperCAmelCase : str = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , "capital" )
168
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )] return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )] def _snake_case ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ): """simple docstring""" _lowerCamelCase : NDArray[floataa] = cross(__snake_case , __snake_case ) _lowerCamelCase : float = sum(__snake_case ) return abs(__snake_case ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
88
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __lowerCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt") __lowerCAmelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) __lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def __lowerCamelCase ( _lowerCAmelCase ) -> Dict: with open(_UpperCamelCase , "rb" ) as f: _UpperCAmelCase = Image.open(_UpperCamelCase ) return im.convert("RGB" ) @dataclass class __SCREAMING_SNAKE_CASE : __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={ """help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).""" } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""}) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""}) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""}) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def UpperCAmelCase__ ( self : Optional[Any] ): if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory." ) @dataclass class __SCREAMING_SNAKE_CASE : __SCREAMING_SNAKE_CASE : str = field( default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase)} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""}) __SCREAMING_SNAKE_CASE : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) __SCREAMING_SNAKE_CASE : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""}) __SCREAMING_SNAKE_CASE : bool = field( default=lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) __SCREAMING_SNAKE_CASE : bool = field( default=lowercase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: _UpperCAmelCase = torch.stack([example["pixel_values"] for example in examples] ) _UpperCAmelCase = torch.tensor([example["labels"] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def __lowerCamelCase ( ) -> List[Any]: _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_image_classification" , _UpperCamelCase , _UpperCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_UpperCamelCase ) transformers.utils.logging.set_verbosity(_UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , ) else: _UpperCAmelCase = {} if data_args.train_dir is not None: _UpperCAmelCase = os.path.join(data_args.train_dir , "**" ) if data_args.validation_dir is not None: _UpperCAmelCase = os.path.join(data_args.validation_dir , "**" ) _UpperCAmelCase = load_dataset( "imagefolder" , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , task="image-classification" , ) # If we don't have a validation split, split off a percentage of train as validation. _UpperCAmelCase = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0: _UpperCAmelCase = dataset["train"].train_test_split(data_args.train_val_split ) _UpperCAmelCase = split["train"] _UpperCAmelCase = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase = dataset["train"].features["labels"].names _UpperCAmelCase , _UpperCAmelCase = {}, {} for i, label in enumerate(_UpperCamelCase ): _UpperCAmelCase = str(_UpperCamelCase ) _UpperCAmelCase = label # Load the accuracy metric from the datasets package _UpperCAmelCase = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_lowerCAmelCase ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: _UpperCAmelCase = image_processor.size["shortest_edge"] else: _UpperCAmelCase = (image_processor.size["height"], image_processor.size["width"]) _UpperCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) _UpperCAmelCase = Compose( [ RandomResizedCrop(_UpperCamelCase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) _UpperCAmelCase = Compose( [ Resize(_UpperCamelCase ), CenterCrop(_UpperCamelCase ), ToTensor(), normalize, ] ) def train_transforms(_lowerCAmelCase ): _UpperCAmelCase = [ _train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"] ] return example_batch def val_transforms(_lowerCAmelCase ): _UpperCAmelCase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _UpperCAmelCase = ( dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(_UpperCamelCase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _UpperCAmelCase = ( dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(_UpperCamelCase ) # Initalize our trainer _UpperCAmelCase = Trainer( model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCamelCase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics("eval" , _UpperCamelCase ) trainer.save_metrics("eval" , _UpperCamelCase ) # Write model card and (optionally) push to hub _UpperCAmelCase = { "finetuned_from": model_args.model_name_or_path, "tasks": "image-classification", "dataset": data_args.dataset_name, "tags": ["image-classification", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**_UpperCamelCase ) else: trainer.create_model_card(**_UpperCamelCase ) if __name__ == "__main__": main()
716
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict=3 , __UpperCamelCase : List[Any]=32 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Union[str, Any]=10 , __UpperCamelCase : Any=[10, 20, 30, 40] , __UpperCamelCase : Optional[int]=[1, 1, 2, 1] , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : int=True , __UpperCamelCase : Any="relu" , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Any=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = embeddings_size _UpperCAmelCase = hidden_sizes _UpperCAmelCase = depths _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_act _UpperCAmelCase = num_labels _UpperCAmelCase = scope _UpperCAmelCase = len(__UpperCamelCase ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : Optional[int] ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : str ): _UpperCAmelCase = TFResNetModel(config=__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : int ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFResNetForImageClassification(__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __SCREAMING_SNAKE_CASE : int = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : int = False __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = TFResNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self : int ): return @unittest.skip(reason="ResNet does not use inputs_embeds" ) def UpperCAmelCase__ ( self : Tuple ): pass @unittest.skip(reason="ResNet does not support input and output embeddings" ) def UpperCAmelCase__ ( self : str ): pass def UpperCAmelCase__ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__UpperCamelCase ) _UpperCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Any ): def check_hidden_states_output(__UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = model_class(__UpperCamelCase ) _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCAmelCase = self.model_tester.num_stages self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: _UpperCAmelCase = layer_type _UpperCAmelCase = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def UpperCAmelCase__ ( self : Tuple ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = TFResNetModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def __lowerCamelCase ( ) -> List[str]: _UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @cached_property def UpperCAmelCase__ ( self : str ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="tf" ) # forward pass _UpperCAmelCase = model(**__UpperCamelCase ) # verify the logits _UpperCAmelCase = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) _UpperCAmelCase = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCamelCase , atol=1e-4 ) )
129
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter __snake_case = '''Create a default config file for Accelerate with only a few flags set.''' def _A ( _lowercase="no" , _lowercase = default_json_config_file , _lowercase = False ) -> List[str]: """simple docstring""" __UpperCamelCase = Path(_lowercase ) path.parent.mkdir(parents=_lowercase , exist_ok=_lowercase ) if path.exists(): print( f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False __UpperCamelCase = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) __UpperCamelCase = { 'compute_environment': 'LOCAL_MACHINE', 'mixed_precision': mixed_precision, } if torch.cuda.is_available(): __UpperCamelCase = torch.cuda.device_count() __UpperCamelCase = num_gpus __UpperCamelCase = False if num_gpus > 1: __UpperCamelCase = 'MULTI_GPU' else: __UpperCamelCase = 'NO' elif is_xpu_available() and use_xpu: __UpperCamelCase = torch.xpu.device_count() __UpperCamelCase = num_xpus __UpperCamelCase = False if num_xpus > 1: __UpperCamelCase = 'MULTI_XPU' else: __UpperCamelCase = 'NO' elif is_npu_available(): __UpperCamelCase = torch.npu.device_count() __UpperCamelCase = num_npus __UpperCamelCase = False if num_npus > 1: __UpperCamelCase = 'MULTI_NPU' else: __UpperCamelCase = 'NO' else: __UpperCamelCase = 0 __UpperCamelCase = True __UpperCamelCase = 1 __UpperCamelCase = 'NO' __UpperCamelCase = ClusterConfig(**_lowercase ) config.to_json_file(_lowercase ) return path def _A ( _lowercase , _lowercase ) -> List[Any]: """simple docstring""" __UpperCamelCase = parser.add_parser('default' , parents=_lowercase , help=_lowercase , formatter_class=_lowercase ) parser.add_argument( '--config_file' , default=_lowercase , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , dest='save_location' , ) parser.add_argument( '--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_lowercase , help='Whether or not to use mixed precision training. ' 'Choose between FP16 and BF16 (bfloat16) training. ' 'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , ) parser.set_defaults(func=_lowercase ) return parser def _A ( _lowercase ) -> Any: """simple docstring""" __UpperCamelCase = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f'''accelerate configuration saved at {config_file}''' )
1
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class UpperCAmelCase( snake_case_ ): """simple docstring""" a : "DiagonalGaussianDistribution" class UpperCAmelCase( snake_case_ , snake_case_ ): """simple docstring""" a : str = True @register_to_config def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_82_15 , ) -> Tuple: """simple docstring""" super().__init__() # pass init params to Encoder lowercase__ : List[Any] = Encoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , ) # pass init params to Decoder lowercase__ : Union[str, Any] = Decoder( in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , ) lowercase__ : Optional[Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) lowercase__ : Optional[int] = nn.Convad(lowerCamelCase , lowerCamelCase , 1 ) lowercase__ : List[Any] = False lowercase__ : str = False # only relevant if vae tiling is enabled lowercase__ : int = self.config.sample_size lowercase__ : Dict = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) lowercase__ : Any = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) lowercase__ : str = 0.25 def __a ( self , lowerCamelCase , lowerCamelCase=False ) -> Optional[Any]: """simple docstring""" if isinstance(lowerCamelCase , (Encoder, Decoder) ): lowercase__ : int = value def __a ( self , lowerCamelCase = True ) -> List[str]: """simple docstring""" lowercase__ : Any = use_tiling def __a ( self ) -> Any: """simple docstring""" self.enable_tiling(lowerCamelCase ) def __a ( self ) -> str: """simple docstring""" lowercase__ : str = True def __a ( self ) -> List[Any]: """simple docstring""" lowercase__ : Dict = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __a ( self ) -> Dict[str, AttentionProcessor]: """simple docstring""" lowercase__ : Any = {} def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): lowercase__ : Dict = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase , lowerCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return processors def __a ( self , lowerCamelCase ) -> Tuple: """simple docstring""" lowercase__ : List[str] = len(self.attn_processors.keys() ) if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , "set_processor" ): if not isinstance(lowerCamelCase , lowerCamelCase ): module.set_processor(lowerCamelCase ) else: module.set_processor(processor.pop(f"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase , lowerCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def __a ( self ) -> Union[str, Any]: """simple docstring""" self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> AutoencoderKLOutput: """simple docstring""" if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase ) if self.use_slicing and x.shape[0] > 1: lowercase__ : int = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )] lowercase__ : Any = torch.cat(lowerCamelCase ) else: lowercase__ : Optional[int] = self.encoder(lowerCamelCase ) lowercase__ : Optional[Any] = self.quant_conv(lowerCamelCase ) lowercase__ : str = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase ) lowercase__ : Tuple = self.post_quant_conv(lowerCamelCase ) lowercase__ : List[Any] = self.decoder(lowerCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) @apply_forward_hook def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if self.use_slicing and z.shape[0] > 1: lowercase__ : Optional[Any] = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )] lowercase__ : Dict = torch.cat(lowerCamelCase ) else: lowercase__ : Dict = self._decode(lowerCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=lowerCamelCase ) def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]: """simple docstring""" lowercase__ : Optional[Any] = min(a.shape[2] , b.shape[2] , lowerCamelCase ) for y in range(lowerCamelCase ): lowercase__ : Union[str, Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]: """simple docstring""" lowercase__ : str = min(a.shape[3] , b.shape[3] , lowerCamelCase ) for x in range(lowerCamelCase ): lowercase__ : List[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> AutoencoderKLOutput: """simple docstring""" lowercase__ : List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) lowercase__ : Optional[int] = int(self.tile_latent_min_size * self.tile_overlap_factor ) lowercase__ : Dict = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. lowercase__ : Optional[int] = [] for i in range(0 , x.shape[2] , lowerCamelCase ): lowercase__ : int = [] for j in range(0 , x.shape[3] , lowerCamelCase ): lowercase__ : Optional[Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] lowercase__ : Any = self.encoder(lowerCamelCase ) lowercase__ : Optional[int] = self.quant_conv(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) lowercase__ : List[str] = [] for i, row in enumerate(lowerCamelCase ): lowercase__ : Optional[int] = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: lowercase__ : Dict = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: lowercase__ : Any = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) lowercase__ : Dict = torch.cat(lowerCamelCase , dim=2 ) lowercase__ : List[str] = DiagonalGaussianDistribution(lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCamelCase ) def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" lowercase__ : Union[str, Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) lowercase__ : List[str] = int(self.tile_sample_min_size * self.tile_overlap_factor ) lowercase__ : Union[str, Any] = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. lowercase__ : List[Any] = [] for i in range(0 , z.shape[2] , lowerCamelCase ): lowercase__ : Dict = [] for j in range(0 , z.shape[3] , lowerCamelCase ): lowercase__ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] lowercase__ : int = self.post_quant_conv(lowerCamelCase ) lowercase__ : Optional[Any] = self.decoder(lowerCamelCase ) row.append(lowerCamelCase ) rows.append(lowerCamelCase ) lowercase__ : List[str] = [] for i, row in enumerate(lowerCamelCase ): lowercase__ : str = [] for j, tile in enumerate(lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: lowercase__ : Tuple = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase ) if j > 0: lowercase__ : Optional[int] = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCamelCase , dim=3 ) ) lowercase__ : str = torch.cat(lowerCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase ) def __a ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" lowercase__ : Optional[int] = sample lowercase__ : List[Any] = self.encode(lowerCamelCase ).latent_dist if sample_posterior: lowercase__ : Union[str, Any] = posterior.sample(generator=lowerCamelCase ) else: lowercase__ : int = posterior.mode() lowercase__ : Tuple = self.decode(lowerCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase )
397
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class lowercase__ ( lowercase_ , lowercase_ ): '''simple docstring''' a : str = '''convnextv2''' def __init__( self, __magic_name__=3, __magic_name__=4, __magic_name__=4, __magic_name__=None, __magic_name__=None, __magic_name__="gelu", __magic_name__=0.02, __magic_name__=1E-12, __magic_name__=0.0, __magic_name__=224, __magic_name__=None, __magic_name__=None, **__magic_name__, ) -> str: """simple docstring""" super().__init__(**__magic_name__ ) UpperCamelCase__ : str = num_channels UpperCamelCase__ : Dict = patch_size UpperCamelCase__ : Tuple = num_stages UpperCamelCase__ : str = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase__ : List[Any] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase__ : List[Any] = hidden_act UpperCamelCase__ : Tuple = initializer_range UpperCamelCase__ : Union[str, Any] = layer_norm_eps UpperCamelCase__ : str = drop_path_rate UpperCamelCase__ : Optional[Any] = image_size UpperCamelCase__ : List[Any] = ['''stem'''] + [f"stage{idx}" for idx in range(1, len(self.depths ) + 1 )] UpperCamelCase__ ,UpperCamelCase__ : str = get_aligned_output_features_output_indices( out_features=__magic_name__, out_indices=__magic_name__, stage_names=self.stage_names )
720
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model UpperCAmelCase_ = '0.12' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[Any]=None ) -> Union[str, Any]: if rng is None: UpperCamelCase__ : Tuple = random.Random() UpperCamelCase__ : List[str] = 1 for dim in shape: total_dims *= dim UpperCamelCase__ : Tuple = [] for _ in range(__UpperCAmelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) UpperCamelCase__ : Any = np.array(__UpperCAmelCase , dtype=jnp.intaa ).reshape(__UpperCAmelCase ) return output def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[int]=None ) -> Any: UpperCamelCase__ : Any = ids_tensor(__UpperCAmelCase , vocab_size=2 , rng=__UpperCAmelCase ) # make sure that at least one token is attended to for each batch UpperCamelCase__ : Tuple = 1 return attn_mask @require_flax class lowercase__ : '''simple docstring''' a : Optional[int] = None a : Union[str, Any] = () def UpperCamelCase__ ( self ) -> str: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 UpperCamelCase__ : str = 2 UpperCamelCase__ : Dict = inputs['''input_ids'''].shape[-1] // 2 UpperCamelCase__ : int = inputs['''input_ids'''][:max_batch_size, :sequence_length] UpperCamelCase__ : Tuple = jnp.ones_like(__magic_name__ ) UpperCamelCase__ : Any = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens UpperCamelCase__ : List[str] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` UpperCamelCase__ : Tuple = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def UpperCamelCase__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = self._get_input_ids_and_config() UpperCamelCase__ : Optional[int] = False UpperCamelCase__ : Optional[int] = max_length UpperCamelCase__ : Dict = 0 for model_class in self.all_generative_model_classes: UpperCamelCase__ : str = model_class(__magic_name__ ) UpperCamelCase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCamelCase__ : List[str] = getattr(__magic_name__, __magic_name__ ) UpperCamelCase__ : List[Any] = pt_model_class(__magic_name__ ).eval() UpperCamelCase__ : int = load_flax_weights_in_pytorch_model(__magic_name__, flax_model.params ) UpperCamelCase__ : Optional[int] = flax_model.generate(__magic_name__ ).sequences UpperCamelCase__ : Union[str, Any] = pt_model.generate(torch.tensor(__magic_name__, dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: UpperCamelCase__ : Optional[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() ) def UpperCamelCase__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = self._get_input_ids_and_config() UpperCamelCase__ : Dict = False UpperCamelCase__ : List[str] = max_length for model_class in self.all_generative_model_classes: UpperCamelCase__ : Dict = model_class(__magic_name__ ) UpperCamelCase__ : List[Any] = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1], __magic_name__ ) UpperCamelCase__ : int = jit(model.generate ) UpperCamelCase__ : Optional[Any] = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = self._get_input_ids_and_config() UpperCamelCase__ : Optional[int] = True UpperCamelCase__ : List[str] = max_length for model_class in self.all_generative_model_classes: UpperCamelCase__ : Dict = model_class(__magic_name__ ) UpperCamelCase__ : Dict = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1], __magic_name__ ) UpperCamelCase__ : Optional[Any] = jit(model.generate ) UpperCamelCase__ : Optional[int] = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self ) -> Dict: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self._get_input_ids_and_config() UpperCamelCase__ : List[str] = False UpperCamelCase__ : Dict = max_length UpperCamelCase__ : str = 2 for model_class in self.all_generative_model_classes: UpperCamelCase__ : Optional[int] = model_class(__magic_name__ ) UpperCamelCase__ : List[Any] = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1], __magic_name__ ) UpperCamelCase__ : str = jit(model.generate ) UpperCamelCase__ : int = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = self._get_input_ids_and_config() UpperCamelCase__ : Tuple = False UpperCamelCase__ : Union[str, Any] = max_length UpperCamelCase__ : Optional[Any] = 2 UpperCamelCase__ : List[Any] = 2 for model_class in self.all_generative_model_classes: UpperCamelCase__ : List[str] = model_class(__magic_name__ ) UpperCamelCase__ : List[str] = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences ) def UpperCamelCase__ ( self ) -> Dict: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = self._get_input_ids_and_config() UpperCamelCase__ : List[str] = True UpperCamelCase__ : Any = max_length UpperCamelCase__ : Union[str, Any] = 0.8 UpperCamelCase__ : Dict = 10 UpperCamelCase__ : Any = 0.3 UpperCamelCase__ : str = 1 UpperCamelCase__ : Union[str, Any] = 8 UpperCamelCase__ : Union[str, Any] = 9 for model_class in self.all_generative_model_classes: UpperCamelCase__ : int = model_class(__magic_name__ ) UpperCamelCase__ : Union[str, Any] = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1], __magic_name__ ) UpperCamelCase__ : Any = jit(model.generate ) UpperCamelCase__ : List[Any] = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = self._get_input_ids_and_config() UpperCamelCase__ : Any = max_length UpperCamelCase__ : str = 1 UpperCamelCase__ : Dict = 8 UpperCamelCase__ : List[Any] = 9 for model_class in self.all_generative_model_classes: UpperCamelCase__ : Union[str, Any] = model_class(__magic_name__ ) UpperCamelCase__ : Optional[int] = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1], __magic_name__ ) UpperCamelCase__ : Dict = jit(model.generate ) UpperCamelCase__ : int = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = self._get_input_ids_and_config() UpperCamelCase__ : Dict = max_length UpperCamelCase__ : List[Any] = 2 UpperCamelCase__ : Dict = 1 UpperCamelCase__ : int = 8 UpperCamelCase__ : List[Any] = 9 for model_class in self.all_generative_model_classes: UpperCamelCase__ : Union[str, Any] = model_class(__magic_name__ ) UpperCamelCase__ : Union[str, Any] = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1], __magic_name__ ) UpperCamelCase__ : Optional[Any] = jit(model.generate ) UpperCamelCase__ : Any = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self._get_input_ids_and_config() # pad attention mask on the left UpperCamelCase__ : Optional[int] = attention_mask.at[(0, 0)].set(0 ) UpperCamelCase__ : Tuple = False UpperCamelCase__ : str = max_length for model_class in self.all_generative_model_classes: UpperCamelCase__ : int = model_class(__magic_name__ ) UpperCamelCase__ : List[Any] = model.generate(__magic_name__, attention_mask=__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1], __magic_name__ ) UpperCamelCase__ : str = jit(model.generate ) UpperCamelCase__ : Optional[Any] = jit_generate(__magic_name__, attention_mask=__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = self._get_input_ids_and_config() # pad attention mask on the left UpperCamelCase__ : Any = attention_mask.at[(0, 0)].set(0 ) UpperCamelCase__ : Any = True UpperCamelCase__ : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: UpperCamelCase__ : Union[str, Any] = model_class(__magic_name__ ) UpperCamelCase__ : Optional[Any] = model.generate(__magic_name__, attention_mask=__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1], __magic_name__ ) UpperCamelCase__ : Union[str, Any] = jit(model.generate ) UpperCamelCase__ : str = jit_generate(__magic_name__, attention_mask=__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def UpperCamelCase__ ( self ) -> int: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = self._get_input_ids_and_config() # pad attention mask on the left UpperCamelCase__ : Union[str, Any] = attention_mask.at[(0, 0)].set(0 ) UpperCamelCase__ : Optional[Any] = 2 UpperCamelCase__ : List[str] = max_length for model_class in self.all_generative_model_classes: UpperCamelCase__ : int = model_class(__magic_name__ ) UpperCamelCase__ : Any = model.generate(__magic_name__, attention_mask=__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1], __magic_name__ ) UpperCamelCase__ : Tuple = jit(model.generate ) UpperCamelCase__ : int = jit_generate(__magic_name__, attention_mask=__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) @require_flax class lowercase__ ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' ) UpperCamelCase__ : Any = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCamelCase__ : int = '''Hello world''' UpperCamelCase__ : Tuple = tokenizer(__magic_name__, return_tensors='''np''' ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__magic_name__, '''do_samples''' ): model.generate(__magic_name__, do_samples=__magic_name__ ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__magic_name__, '''foo''' ): UpperCamelCase__ : Optional[Any] = {'''foo''': '''bar'''} model.generate(__magic_name__, **__magic_name__ )
369
0
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class lowercase ( unittest.TestCase ): def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=4 , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def a ( self ): snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a ( self ): snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class lowercase ( a_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def a ( self ): snake_case_ = FlaxAlbertModelTester(self ) @slow def a ( self ): for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained('albert-base-v2' ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase_ ) @require_flax class lowercase ( unittest.TestCase ): @slow def a ( self ): snake_case_ = FlaxAlbertModel.from_pretrained('albert-base-v2' ) snake_case_ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) snake_case_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) snake_case_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] snake_case_ = (1, 11, 768) self.assertEqual(output.shape , lowerCAmelCase_ ) snake_case_ = np.array( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) )
362
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil UpperCAmelCase__ = 1_0_0 UpperCAmelCase__ = set(range(3, NUM_PRIMES, 2)) primes.add(2) UpperCAmelCase__ = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} __A= set() __A= 42 __A= 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int = 5000 ): """simple docstring""" for number_to_partition in range(1,_SCREAMING_SNAKE_CASE ): if len(partition(_SCREAMING_SNAKE_CASE ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"""{solution() = }""")
186
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : Dict = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' for attribute in key.split(""".""" ): __magic_name__ = getattr(A_, A_ ) if weight_type is not None: __magic_name__ = getattr(A_, A_ ).shape else: __magic_name__ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __magic_name__ = value elif weight_type == "weight_g": __magic_name__ = value elif weight_type == "weight_v": __magic_name__ = value elif weight_type == "bias": __magic_name__ = value else: __magic_name__ = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = [] __magic_name__ = fairseq_model.state_dict() __magic_name__ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __magic_name__ = False if "conv_layers" in name: load_conv_layer( A_, A_, A_, A_, hf_model.config.feat_extract_norm == """group""", ) __magic_name__ = True else: for key, mapped_key in MAPPING.items(): __magic_name__ = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned): __magic_name__ = True if "*" in mapped_key: __magic_name__ = name.split(A_ )[0].split(""".""" )[-2] __magic_name__ = mapped_key.replace("""*""", A_ ) if "weight_g" in name: __magic_name__ = """weight_g""" elif "weight_v" in name: __magic_name__ = """weight_v""" elif "weight" in name: __magic_name__ = """weight""" elif "bias" in name: __magic_name__ = """bias""" else: __magic_name__ = None set_recursively(A_, A_, A_, A_, A_ ) continue if not is_used: unused_weights.append(A_ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = full_name.split("""conv_layers.""" )[-1] __magic_name__ = name.split(""".""" ) __magic_name__ = int(items[0] ) __magic_name__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __magic_name__ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __magic_name__ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __magic_name__ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __magic_name__ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A_ ) @torch.no_grad() def a__ ( A_, A_, A_=None, A_=None, A_=True ): '''simple docstring''' if config_path is not None: __magic_name__ = HubertConfig.from_pretrained(A_ ) else: __magic_name__ = HubertConfig() if is_finetuned: if dict_path: __magic_name__ = Dictionary.load(A_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __magic_name__ = target_dict.pad_index __magic_name__ = target_dict.bos_index __magic_name__ = target_dict.eos_index __magic_name__ = len(target_dict.symbols ) __magic_name__ = os.path.join(A_, """vocab.json""" ) if not os.path.isdir(A_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(A_ ) ) return os.makedirs(A_, exist_ok=A_ ) with open(A_, """w""", encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices, A_ ) __magic_name__ = WavaVecaCTCTokenizer( A_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=A_, ) __magic_name__ = True if config.feat_extract_norm == """layer""" else False __magic_name__ = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=A_, return_attention_mask=A_, ) __magic_name__ = WavaVecaProcessor(feature_extractor=A_, tokenizer=A_ ) processor.save_pretrained(A_ ) __magic_name__ = HubertForCTC(A_ ) else: __magic_name__ = HubertModel(A_ ) if is_finetuned: __magic_name__ , __magic_name__ , __magic_name__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __magic_name__ , __magic_name__ , __magic_name__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __magic_name__ = model[0].eval() recursively_load_weights(A_, A_, A_ ) hf_wavavec.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __lowerCAmelCase : Dict = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
711
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType _lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__) _lowerCAmelCase: Any = { 'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json', } class lowercase_ (lowercase__ ): snake_case ='layoutlmv3' def __init__( self , lowercase_=50265 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=1024 , lowercase_=128 , lowercase_=128 , lowercase_=True , lowercase_=32 , lowercase_=128 , lowercase_=64 , lowercase_=256 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=224 , lowercase_=3 , lowercase_=16 , lowercase_=None , **lowercase_ , ) -> int: super().__init__( vocab_size=lowercase_ , hidden_size=lowercase_ , num_hidden_layers=lowercase_ , num_attention_heads=lowercase_ , intermediate_size=lowercase_ , hidden_act=lowercase_ , hidden_dropout_prob=lowercase_ , attention_probs_dropout_prob=lowercase_ , max_position_embeddings=lowercase_ , type_vocab_size=lowercase_ , initializer_range=lowercase_ , layer_norm_eps=lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , ) a__ =max_ad_position_embeddings a__ =coordinate_size a__ =shape_size a__ =has_relative_attention_bias a__ =rel_pos_bins a__ =max_rel_pos a__ =has_spatial_attention_bias a__ =rel_ad_pos_bins a__ =max_rel_ad_pos a__ =text_embed a__ =visual_embed a__ =input_size a__ =num_channels a__ =patch_size a__ =classifier_dropout class lowercase_ (lowercase__ ): snake_case =version.parse('1.12' ) @property def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) else: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels'}), ]) @property def __UpperCamelCase ( self) -> float: return 1e-5 @property def __UpperCamelCase ( self) -> int: return 12 def __UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 3 , lowercase_ = 40 , lowercase_ = 40 , ) -> Mapping[str, Any]: setattr(processor.image_processor , 'apply_ocr' , lowercase_) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX a__ =compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX a__ =processor.tokenizer.num_special_tokens_to_add(lowercase_) a__ =compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_) # Generate dummy inputs according to compute batch and sequence a__ =[[' '.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size # Generate dummy bounding boxes a__ =[[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) a__ =self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_) a__ =dict( processor( lowercase_ , text=lowercase_ , boxes=lowercase_ , return_tensors=lowercase_ , )) return inputs
20
from __future__ import annotations from typing import Any class lowercase_ : def __init__( self , lowercase_) -> None: a__ =num_of_nodes a__ =[] a__ ={} def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None: self.m_edges.append([u_node, v_node, weight]) def __UpperCamelCase ( self , lowercase_) -> int: if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node]) def __UpperCamelCase ( self , lowercase_) -> None: if self.m_component[u_node] != u_node: for k in self.m_component: a__ =self.find_component(lowercase_) def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None: if component_size[u_node] <= component_size[v_node]: a__ =v_node component_size[v_node] += component_size[u_node] self.set_component(lowercase_) elif component_size[u_node] >= component_size[v_node]: a__ =self.find_component(lowercase_) component_size[u_node] += component_size[v_node] self.set_component(lowercase_) def __UpperCamelCase ( self) -> None: a__ =[] a__ =0 a__ =[-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes): self.m_component.update({node: node}) component_size.append(1) a__ =self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: a__ , a__ , a__ =edge a__ =self.m_component[u] a__ =self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): a__ =[u, v, w] for edge in minimum_weight_edge: if isinstance(lowercase_ , lowercase_): a__ , a__ , a__ =edge a__ =self.m_component[u] a__ =self.m_component[v] if u_component != v_component: mst_weight += w self.union(lowercase_ , lowercase_ , lowercase_) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""") num_of_components -= 1 a__ =[-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""") def _lowercase( ): pass if __name__ == "__main__": import doctest doctest.testmod()
20
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''image_processor''', '''tokenizer'''] UpperCAmelCase__ = '''AutoImageProcessor''' UpperCAmelCase__ = '''AutoTokenizer''' def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple) ->Optional[Any]: '''simple docstring''' super().__init__(UpperCAmelCase__ , UpperCAmelCase__) A__ = self.image_processor def __call__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict) ->Any: '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''') if text is not None: A__ = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) if images is not None: A__ = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) if text is not None and images is not None: A__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase__) , tensor_type=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int) ->Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Tuple) ->Union[str, Any]: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__) @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
177
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->Dict: '''simple docstring''' A__ = jnp.ones((batch_size, length)) / length return scores def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: '''simple docstring''' A__ = None A__ = 20 A__ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__) # tweak scores to not be uniform anymore A__ = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch A__ = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch # compute softmax A__ = jax.nn.softmax(UpperCAmelCase__ , axis=-1) A__ = FlaxTemperatureLogitsWarper(temperature=0.5) A__ = FlaxTemperatureLogitsWarper(temperature=1.3) A__ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__) , axis=-1) A__ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__) , axis=-1) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3)) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min()) def SCREAMING_SNAKE_CASE ( self : str) ->int: '''simple docstring''' A__ = None A__ = 10 A__ = 2 # create ramp distribution A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() A__ = ramp_logits[1:, : vocab_size // 2] + vocab_size A__ = FlaxTopKLogitsWarper(3) A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False]) self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True]) # check special case A__ = 5 A__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3) A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, length)).copy() A__ = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2]) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: '''simple docstring''' A__ = None A__ = 10 A__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) A__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]])) A__ = FlaxTopPLogitsWarper(0.8) A__ = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 A__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]]) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3)) # check edge cases with negative and extreme logits A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme A__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept A__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0) A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2]) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: '''simple docstring''' A__ = 20 A__ = 4 A__ = 0 A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__) # check that min length is applied at length 5 A__ = ids_tensor((batch_size, 20) , vocab_size=20) A__ = 5 A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__) A__ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''')]) # check that min length is not applied anymore at length 15 A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__) A__ = 15 A__ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) self.assertFalse(jnp.isinf(UpperCAmelCase__).any()) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ = 20 A__ = 4 A__ = 0 A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__) # check that all scores are -inf except the bos_token_id score A__ = ids_tensor((batch_size, 1) , vocab_size=20) A__ = 1 A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__) A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 A__ = 3 A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__) A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) self.assertFalse(jnp.isinf(UpperCAmelCase__).any()) def SCREAMING_SNAKE_CASE ( self : Tuple) ->str: '''simple docstring''' A__ = 20 A__ = 4 A__ = 0 A__ = 5 A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__) # check that all scores are -inf except the eos_token_id when max_length is reached A__ = ids_tensor((batch_size, 4) , vocab_size=20) A__ = 4 A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__) A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached A__ = 3 A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__) A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) self.assertFalse(jnp.isinf(UpperCAmelCase__).any()) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = 4 A__ = 10 A__ = 15 A__ = 2 A__ = 1 A__ = 15 # dummy input_ids and scores A__ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__) A__ = input_ids.copy() A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__) A__ = scores.copy() # instantiate all dist processors A__ = FlaxTemperatureLogitsWarper(temperature=0.5) A__ = FlaxTopKLogitsWarper(3) A__ = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__) A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__) A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__) A__ = 10 # no processor list A__ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) A__ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) A__ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) A__ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) # with processor list A__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]) A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) # scores should be equal self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist()) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = 4 A__ = 10 A__ = 15 A__ = 2 A__ = 1 A__ = 15 # dummy input_ids and scores A__ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__) A__ = input_ids.copy() A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__) A__ = scores.copy() # instantiate all dist processors A__ = FlaxTemperatureLogitsWarper(temperature=0.5) A__ = FlaxTopKLogitsWarper(3) A__ = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__) A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__) A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__) A__ = 10 # no processor list def run_no_processor_list(UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]): A__ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) A__ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) A__ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) A__ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) return scores # with processor list def run_processor_list(UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]): A__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]) A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__) return scores A__ = jax.jit(UpperCAmelCase__) A__ = jax.jit(UpperCAmelCase__) A__ = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) A__ = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # scores should be equal self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
177
1
"""simple docstring""" from __future__ import annotations def lowercase_ ( _lowercase : list[int] , _lowercase : list[int] , _lowercase : list[int] , _lowercase : list[list[str]] , _lowercase : int , ): '''simple docstring''' UpperCAmelCase : Optional[int] = len(_lowercase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowercase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowercase , _lowercase , ) def lowercase_ ( _lowercase : int ): '''simple docstring''' UpperCAmelCase : list[list[str]] = [] depth_first_search([] , [] , [] , _lowercase , _lowercase ) # Print all the boards for board in boards: for column in board: print(_lowercase ) print("" ) print(len(_lowercase ) , "solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
595
"""simple docstring""" def lowercase_ ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] UpperCAmelCase : int = 6 UpperCAmelCase : Tuple = 1 UpperCAmelCase : List[str] = 19_01 UpperCAmelCase : Tuple = 0 while year < 20_01: day += 7 if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 UpperCAmelCase : Tuple = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 UpperCAmelCase : Optional[Any] = day - 29 else: if day > days_per_month[month - 1]: month += 1 UpperCAmelCase : Union[str, Any] = day - days_per_month[month - 2] if month > 12: year += 1 UpperCAmelCase : Union[str, Any] = 1 if year < 20_01 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
595
1
import math class _lowerCAmelCase : """simple docstring""" def __init__( self , __UpperCAmelCase=0 ): # a graph with Node 0,1,...,N-1 '''simple docstring''' lowerCAmelCase__ :Any = n lowerCAmelCase__ :List[Any] = [ [math.inf for j in range(0 , __UpperCAmelCase )] for i in range(0 , __UpperCAmelCase ) ] # adjacency matrix for weight lowerCAmelCase__ :List[str] = [ [math.inf for j in range(0 , __UpperCAmelCase )] for i in range(0 , __UpperCAmelCase ) ] # dp[i][j] stores minimum distance from i to j def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = w def snake_case ( self ): '''simple docstring''' for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowerCAmelCase__ :List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return self.dp[u][v] if __name__ == "__main__": __A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
707
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[Any] = AudioLDMPipeline __magic_name__ :Union[str, Any] = TEXT_TO_AUDIO_PARAMS __magic_name__ :Tuple = TEXT_TO_AUDIO_BATCH_PARAMS __magic_name__ :Dict = frozenset( [ """num_inference_steps""", """num_waveforms_per_prompt""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) def snake_case ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ :Union[str, Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=__UpperCAmelCase , ) lowerCAmelCase__ :Tuple = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) lowerCAmelCase__ :Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase__ :Dict = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , ) lowerCAmelCase__ :int = ClapTextModelWithProjection(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 ) lowerCAmelCase__ :Dict = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__UpperCAmelCase , ) lowerCAmelCase__ :str = SpeechTaHifiGan(__UpperCAmelCase ) lowerCAmelCase__ :str = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' if str(__UpperCAmelCase ).startswith('mps' ): lowerCAmelCase__ :Tuple = torch.manual_seed(__UpperCAmelCase ) else: lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :Optional[int] = self.get_dummy_components() lowerCAmelCase__ :List[str] = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Any = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :int = audioldm_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 2_5_6 lowerCAmelCase__ :int = audio[:1_0] lowerCAmelCase__ :Optional[Any] = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ :int = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = audioldm_pipe.to(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = 3 * [inputs['prompt']] # forward lowerCAmelCase__ :Dict = audioldm_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = output.audios[0] lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = 3 * [inputs.pop('prompt' )] lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.tokenizer( __UpperCAmelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='pt' , ) lowerCAmelCase__ :Union[str, Any] = text_inputs['input_ids'].to(__UpperCAmelCase ) lowerCAmelCase__ :Dict = audioldm_pipe.text_encoder( __UpperCAmelCase , ) lowerCAmelCase__ :Any = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state lowerCAmelCase__ :List[Any] = F.normalize(__UpperCAmelCase , dim=-1 ) lowerCAmelCase__ :int = prompt_embeds # forward lowerCAmelCase__ :str = audioldm_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.get_dummy_components() lowerCAmelCase__ :str = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = audioldm_pipe.to(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt'] lowerCAmelCase__ :str = negative_prompt lowerCAmelCase__ :List[Any] = 3 * [inputs['prompt']] # forward lowerCAmelCase__ :Any = audioldm_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = output.audios[0] lowerCAmelCase__ :List[str] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = 3 * [inputs.pop('prompt' )] lowerCAmelCase__ :str = [] for p in [prompt, negative_prompt]: lowerCAmelCase__ :Optional[Any] = audioldm_pipe.tokenizer( __UpperCAmelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='pt' , ) lowerCAmelCase__ :List[Any] = text_inputs['input_ids'].to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = audioldm_pipe.text_encoder( __UpperCAmelCase , ) lowerCAmelCase__ :Tuple = text_embeds.text_embeds # additional L_2 normalization over each hidden-state lowerCAmelCase__ :Dict = F.normalize(__UpperCAmelCase , dim=-1 ) embeds.append(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ :Tuple = embeds # forward lowerCAmelCase__ :Dict = audioldm_pipe(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ :Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :str = 'egg cracking' lowerCAmelCase__ :Optional[int] = audioldm_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 2_5_6 lowerCAmelCase__ :List[Any] = audio[:1_0] lowerCAmelCase__ :Any = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :Tuple = self.get_dummy_components() lowerCAmelCase__ :Optional[int] = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Tuple = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) lowerCAmelCase__ :Tuple = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 ).audios assert audios.shape == (1, 2_5_6) # test num_waveforms_per_prompt=1 (default) for batch of prompts lowerCAmelCase__ :str = 2 lowerCAmelCase__ :Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_5_6) # test num_waveforms_per_prompt for single prompt lowerCAmelCase__ :Any = 2 lowerCAmelCase__ :Union[str, Any] = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios assert audios.shape == (num_waveforms_per_prompt, 2_5_6) # test num_waveforms_per_prompt for batch of prompts lowerCAmelCase__ :List[str] = 2 lowerCAmelCase__ :List[str] = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ :Dict = self.get_dummy_components() lowerCAmelCase__ :Dict = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :str = audioldm_pipe.vocoder.config.sampling_rate lowerCAmelCase__ :Tuple = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = audioldm_pipe(audio_length_in_s=0.0_16 , **__UpperCAmelCase ) lowerCAmelCase__ :int = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.0_16 lowerCAmelCase__ :List[Any] = audioldm_pipe(audio_length_in_s=0.0_32 , **__UpperCAmelCase ) lowerCAmelCase__ :str = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.0_32 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ :Optional[int] = AudioLDMPipeline(**__UpperCAmelCase ) lowerCAmelCase__ :str = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = ['hey'] lowerCAmelCase__ :Any = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 ) lowerCAmelCase__ :List[Any] = output.audios.shape assert audio_shape == (1, 2_5_6) lowerCAmelCase__ :List[Any] = audioldm_pipe.vocoder.config config.model_in_dim *= 2 lowerCAmelCase__ :Tuple = SpeechTaHifiGan(__UpperCAmelCase ).to(__UpperCAmelCase ) lowerCAmelCase__ :Any = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 ) lowerCAmelCase__ :Any = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_5_6) def snake_case ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' self._test_inference_batch_single_identical(test_mean_pixel_difference=__UpperCAmelCase ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def snake_case ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase ) @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ): '''simple docstring''' lowerCAmelCase__ :str = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 8, 1_2_8, 1_6) ) lowerCAmelCase__ :Any = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) lowerCAmelCase__ :List[str] = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) lowerCAmelCase__ :Optional[Any] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = self.get_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Dict = 2_5 lowerCAmelCase__ :List[Any] = audioldm_pipe(**__UpperCAmelCase ).audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 8_1_9_2_0 lowerCAmelCase__ :Optional[Any] = audio[7_7_2_3_0:7_7_2_4_0] lowerCAmelCase__ :Dict = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) lowerCAmelCase__ :int = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) lowerCAmelCase__ :int = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = self.get_inputs(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = audioldm_pipe(**__UpperCAmelCase ).audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 8_1_9_2_0 lowerCAmelCase__ :Tuple = audio[2_7_7_8_0:2_7_7_9_0] lowerCAmelCase__ :Union[str, Any] = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) lowerCAmelCase__ :Any = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
560
0
from __future__ import annotations from typing import Generic, TypeVar a : List[Any] = TypeVar("T") class a ( Generic[T] ): """simple docstring""" def __init__( self : str , __lowercase : T ) -> None: __UpperCAmelCase : Optional[int] = data __UpperCAmelCase : Optional[Any] = self __UpperCAmelCase : Optional[int] = 0 class a ( Generic[T] ): """simple docstring""" def __init__( self : Any ) -> None: # map from node name to the node object __UpperCAmelCase : dict[T, DisjointSetTreeNode[T]] = {} def UpperCAmelCase ( self : List[str] , __lowercase : T ) -> None: # create a new set with x as its member __UpperCAmelCase : Optional[int] = DisjointSetTreeNode(__lowercase ) def UpperCAmelCase ( self : Optional[Any] , __lowercase : T ) -> DisjointSetTreeNode[T]: # find the set x belongs to (with path-compression) __UpperCAmelCase : Any = self.map[data] if elem_ref != elem_ref.parent: __UpperCAmelCase : List[Any] = self.find_set(elem_ref.parent.data ) return elem_ref.parent def UpperCAmelCase ( self : Tuple , __lowercase : DisjointSetTreeNode[T] , __lowercase : DisjointSetTreeNode[T] ) -> None: # helper function for union operation if nodea.rank > nodea.rank: __UpperCAmelCase : Dict = nodea else: __UpperCAmelCase : List[Any] = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def UpperCAmelCase ( self : int , __lowercase : T , __lowercase : T ) -> None: # merge 2 disjoint sets self.link(self.find_set(__lowercase ) , self.find_set(__lowercase ) ) class a ( Generic[T] ): """simple docstring""" def __init__( self : Any ) -> None: # connections: map from the node to the neighbouring nodes (with weights) __UpperCAmelCase : dict[T, dict[T, int]] = {} def UpperCAmelCase ( self : Dict , __lowercase : T ) -> None: # add a node ONLY if its not present in the graph if node not in self.connections: __UpperCAmelCase : List[Any] = {} def UpperCAmelCase ( self : Union[str, Any] , __lowercase : T , __lowercase : T , __lowercase : int ) -> None: # add an edge with the given weight self.add_node(__lowercase ) self.add_node(__lowercase ) __UpperCAmelCase : Any = weight __UpperCAmelCase : Any = weight def UpperCAmelCase ( self : Tuple ) -> GraphUndirectedWeighted[T]: __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : int = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __lowercase : x[2] ) # creating the disjoint set __UpperCAmelCase : str = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__lowercase ) # MST generation __UpperCAmelCase : Any = 0 __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : str = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = edges[index] index += 1 __UpperCAmelCase : str = disjoint_set.find_set(__lowercase ) __UpperCAmelCase : Union[str, Any] = disjoint_set.find_set(__lowercase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__lowercase , __lowercase , __lowercase ) disjoint_set.union(__lowercase , __lowercase ) return graph
63
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor a_ = logging.get_logger(__name__) class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
177
0
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __UpperCAmelCase ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(A ): requests.request('''GET''' , '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 ) @pytest.mark.integration def __UpperCAmelCase ( ) -> str: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''' , '''https://huggingface.co''' ) def __UpperCAmelCase ( ) -> Dict: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(A ): http_head('''https://huggingface.co''' )
216
'''simple docstring''' import math import sys import cva import numpy as np def __UpperCAmelCase ( A : np.ndarray , A : float ) -> np.ndarray: # For applying gaussian function for each element in matrix. UpperCAmelCase_ : int = math.sqrt(A ) UpperCAmelCase_ : Dict = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __UpperCAmelCase ( A : np.ndarray , A : int , A : int , A : int ) -> np.ndarray: UpperCAmelCase_ : Tuple = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __UpperCAmelCase ( A : int , A : float ) -> np.ndarray: # Creates a gaussian kernel of given dimension. UpperCAmelCase_ : Optional[int] = np.zeros((kernel_size, kernel_size) ) for i in range(0 , A ): for j in range(0 , A ): UpperCAmelCase_ : List[Any] = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(A , A ) def __UpperCAmelCase ( A : np.ndarray , A : float , A : float , A : int , ) -> np.ndarray: UpperCAmelCase_ : Optional[int] = np.zeros(img.shape ) UpperCAmelCase_ : Optional[int] = get_gauss_kernel(A , A ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): UpperCAmelCase_ : Dict = get_slice(A , A , A , A ) UpperCAmelCase_ : Tuple = img_s - img_s[kernel_size // 2, kernel_size // 2] UpperCAmelCase_ : Optional[int] = vec_gaussian(A , A ) UpperCAmelCase_ : Union[str, Any] = np.multiply(A , A ) UpperCAmelCase_ : List[Any] = np.multiply(A , A ) UpperCAmelCase_ : Optional[int] = np.sum(A ) / np.sum(A ) UpperCAmelCase_ : str = val return imga def __UpperCAmelCase ( A : list ) -> tuple: UpperCAmelCase_ : Any = args[1] if args[1:] else '''../image_data/lena.jpg''' UpperCAmelCase_ : Optional[Any] = float(args[2] ) if args[2:] else 1.0 UpperCAmelCase_ : str = float(args[3] ) if args[3:] else 1.0 if args[4:]: UpperCAmelCase_ : Union[str, Any] = int(args[4] ) UpperCAmelCase_ : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 ) else: UpperCAmelCase_ : Union[str, Any] = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = parse_args(sys.argv) _UpperCamelCase : Union[str, Any] = cva.imread(filename, 0) cva.imshow('input image', img) _UpperCamelCase : List[Any] = img / 255 _UpperCamelCase : List[Any] = out.astype('float32') _UpperCamelCase : Optional[Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) _UpperCamelCase : Union[str, Any] = out * 255 _UpperCamelCase : str = np.uinta(out) cva.imshow('output image', out) cva.waitKey(0) cva.destroyAllWindows()
216
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case_ : List[Any] = """switch_transformers""" snake_case_ : Union[str, Any] = ["""past_key_values"""] snake_case_ : Union[str, Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : List[str] , lowerCAmelCase : Optional[Any]=3_2128 , lowerCAmelCase : Union[str, Any]=768 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[Any]=2048 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Any=12 , lowerCAmelCase : Any=3 , lowerCAmelCase : int=12 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=8 , lowerCAmelCase : int=False , lowerCAmelCase : str=0.01 , lowerCAmelCase : Tuple="float32" , lowerCAmelCase : str=False , lowerCAmelCase : Any=32 , lowerCAmelCase : Union[str, Any]=128 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=1E-6 , lowerCAmelCase : Any=0.001 , lowerCAmelCase : Tuple=0.001 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Tuple="relu" , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=False , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=1 , **lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" _snake_case : Optional[int] = vocab_size _snake_case : str = d_model _snake_case : Optional[Any] = d_kv _snake_case : Optional[int] = d_ff _snake_case : Any = num_sparse_encoder_layers _snake_case : Dict = num_layers _snake_case : Optional[int] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _snake_case : Dict = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: _snake_case : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers else: _snake_case : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: _snake_case : str = self.num_decoder_layers // self.num_sparse_decoder_layers else: _snake_case : Union[str, Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers _snake_case : List[Any] = num_heads _snake_case : int = num_experts _snake_case : List[str] = expert_capacity _snake_case : Any = router_bias _snake_case : Dict = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''') _snake_case : str = router_dtype _snake_case : Optional[int] = router_ignore_padding_tokens _snake_case : Dict = relative_attention_num_buckets _snake_case : Dict = relative_attention_max_distance _snake_case : List[str] = dropout_rate _snake_case : Union[str, Any] = layer_norm_epsilon _snake_case : Optional[int] = initializer_factor _snake_case : List[Any] = feed_forward_proj _snake_case : Dict = use_cache _snake_case : str = add_router_probs _snake_case : Dict = router_z_loss_coef _snake_case : Dict = router_aux_loss_coef _snake_case : List[str] = self.feed_forward_proj.split("""-""") _snake_case : Union[str, Any] = act_info[-1] _snake_case : List[Any] = act_info[0] == """gated""" if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""") # for backwards compatibility if feed_forward_proj == "gated-gelu": _snake_case : List[Any] = """gelu_new""" super().__init__( pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase , )
477
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: a__ = None a__ = logging.get_logger(__name__) a__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""} a__ = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, """tokenizer_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""", }, } a__ = { """google/rembert""": 2_56, } a__ = """▁""" class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case_ : Union[str, Any] = VOCAB_FILES_NAMES snake_case_ : Dict = PRETRAINED_VOCAB_FILES_MAP snake_case_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ : str = RemBertTokenizer def __init__( self : List[str] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : Union[str, Any]="[CLS]" , lowerCAmelCase : List[Any]="[SEP]" , lowerCAmelCase : List[str]="<unk>" , lowerCAmelCase : int="[SEP]" , lowerCAmelCase : Optional[Any]="<pad>" , lowerCAmelCase : Union[str, Any]="[CLS]" , lowerCAmelCase : Optional[Any]="[MASK]" , **lowerCAmelCase : Optional[int] , ) -> Tuple: """simple docstring""" _snake_case : Optional[Any] = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else mask_token super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , **lowerCAmelCase , ) _snake_case : List[str] = do_lower_case _snake_case : Optional[int] = remove_space _snake_case : List[Any] = keep_accents _snake_case : Optional[Any] = vocab_file _snake_case : Any = False if not self.vocab_file else True def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]: """simple docstring""" _snake_case : Any = [self.sep_token_id] _snake_case : List[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""") return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase)) + [1] + ([0] * len(lowerCAmelCase)) + [1] return [1] + ([0] * len(lowerCAmelCase)) + [1] def UpperCamelCase_ ( self : int , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]: """simple docstring""" _snake_case : List[str] = [self.sep_token_id] _snake_case : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def UpperCamelCase_ ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCAmelCase): logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCAmelCase)) return _snake_case : Optional[Any] = os.path.join( lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase): copyfile(self.vocab_file , lowerCAmelCase) return (out_vocab_file,)
477
1
import math def UpperCamelCase_( __magic_name__ : int ): """simple docstring""" return math.sqrt(__magic_name__ ) * math.sqrt(__magic_name__ ) == num def UpperCamelCase_( __magic_name__ : int ): """simple docstring""" _lowerCAmelCase :List[Any] = 0 _lowerCAmelCase :Any = n while left <= right: _lowerCAmelCase :Optional[int] = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _lowerCAmelCase :List[Any] = mid - 1 else: _lowerCAmelCase :List[Any] = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
704
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
382
0
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = value UpperCAmelCase__ : Any = None UpperCAmelCase__ : Any = None class UpperCAmelCase_ : '''simple docstring''' def __init__( self : int , snake_case__ : Node ): '''simple docstring''' UpperCAmelCase__ : List[str] = tree def UpperCamelCase ( self : List[Any] , snake_case__ : Node | None ): '''simple docstring''' if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : int ): '''simple docstring''' yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
199
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __a: Tuple = logging.get_logger(__name__) __a: Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __a: Any = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } __a: Any = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } __a: str = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } __a: Dict = { '''facebook/dpr-ctx_encoder-single-nq-base''': 512, '''facebook/dpr-ctx_encoder-multiset-base''': 512, } __a: List[str] = { '''facebook/dpr-question_encoder-single-nq-base''': 512, '''facebook/dpr-question_encoder-multiset-base''': 512, } __a: Dict = { '''facebook/dpr-reader-single-nq-base''': 512, '''facebook/dpr-reader-multiset-base''': 512, } __a: Optional[int] = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } __a: Tuple = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } __a: Optional[int] = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __a: List[Any] = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) __a: Optional[int] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) __a: Optional[Any] = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __call__( self : int , lowerCamelCase : int , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Union[bool, str] = False , lowerCamelCase : Union[bool, str] = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[bool] = None , **lowerCamelCase : Optional[int] , ) -> BatchEncoding: """simple docstring""" if titles is None and texts is None: return super().__call__( lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , return_tensors=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , ) elif titles is None or texts is None: _UpperCAmelCase = titles if texts is None else texts return super().__call__( lowerCamelCase , lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , return_tensors=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , ) _UpperCAmelCase = titles if not isinstance(lowerCamelCase , lowerCamelCase ) else [titles] _UpperCAmelCase = texts if not isinstance(lowerCamelCase , lowerCamelCase ) else [texts] _UpperCAmelCase = len(lowerCamelCase ) _UpperCAmelCase = questions if not isinstance(lowerCamelCase , lowerCamelCase ) else [questions] * n_passages if len(lowerCamelCase ) != len(lowerCamelCase ): raise ValueError( f"""There should be as many titles than texts but got {len(lowerCamelCase )} titles and {len(lowerCamelCase )} texts.""" ) _UpperCAmelCase = super().__call__(lowerCamelCase , lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase )["""input_ids"""] _UpperCAmelCase = super().__call__(lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase )["""input_ids"""] _UpperCAmelCase = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCamelCase , lowerCamelCase ) ] } if return_attention_mask is not False: _UpperCAmelCase = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) _UpperCAmelCase = attention_mask return self.pad(lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , return_tensors=lowerCamelCase ) def lowerCamelCase ( self : Tuple , lowerCamelCase : BatchEncoding , lowerCamelCase : DPRReaderOutput , lowerCamelCase : int = 16 , lowerCamelCase : int = 64 , lowerCamelCase : int = 4 , ) -> List[DPRSpanPrediction]: """simple docstring""" _UpperCAmelCase = reader_input["""input_ids"""] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3] _UpperCAmelCase = len(lowerCamelCase ) _UpperCAmelCase = sorted(range(lowerCamelCase ) , reverse=lowerCamelCase , key=relevance_logits.__getitem__ ) _UpperCAmelCase = [] for doc_id in sorted_docs: _UpperCAmelCase = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence _UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: _UpperCAmelCase = sequence_ids.index(self.pad_token_id ) else: _UpperCAmelCase = len(lowerCamelCase ) _UpperCAmelCase = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase , top_spans=lowerCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase , start_index=lowerCamelCase , end_index=lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowerCamelCase ( self : List[Any] , lowerCamelCase : List[int] , lowerCamelCase : List[int] , lowerCamelCase : int , lowerCamelCase : int , ) -> List[DPRSpanPrediction]: """simple docstring""" _UpperCAmelCase = [] for start_index, start_score in enumerate(lowerCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) _UpperCAmelCase = sorted(lowerCamelCase , key=lambda lowerCamelCase : x[1] , reverse=lowerCamelCase ) _UpperCAmelCase = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" ) _UpperCAmelCase = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase ): '''simple docstring''' _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION _lowerCamelCase = ['''input_ids''', '''attention_mask''']
108
0
'''simple docstring''' import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCamelCase : List[str] = logging.get_logger(__name__) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : int = WavaVecaForSequenceClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase ) lowerCamelCase_ : Dict = downstream_dict['''projector.weight'''] lowerCamelCase_ : List[Any] = downstream_dict['''projector.bias'''] lowerCamelCase_ : Optional[Any] = downstream_dict['''model.post_net.linear.weight'''] lowerCamelCase_ : List[str] = downstream_dict['''model.post_net.linear.bias'''] return model def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : Dict = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase ) lowerCamelCase_ : Any = downstream_dict['''model.linear.weight'''] lowerCamelCase_ : Union[str, Any] = downstream_dict['''model.linear.bias'''] return model def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : List[str] = WavaVecaForXVector.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase ) lowerCamelCase_ : Optional[Any] = downstream_dict['''connector.weight'''] lowerCamelCase_ : str = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowerCamelCase_ : Union[str, Any] = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] lowerCamelCase_ : Dict = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] lowerCamelCase_ : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] lowerCamelCase_ : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] lowerCamelCase_ : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] lowerCamelCase_ : int = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] lowerCamelCase_ : Any = downstream_dict['''objective.W'''] return model @torch.no_grad() def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : Union[str, Any] = torch.load(__UpperCAmelCase , map_location='''cpu''' ) lowerCamelCase_ : int = checkpoint['''Downstream'''] lowerCamelCase_ : int = WavaVecaConfig.from_pretrained(__UpperCAmelCase ) lowerCamelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained( __UpperCAmelCase , return_attention_mask=__UpperCAmelCase , do_normalize=__UpperCAmelCase ) lowerCamelCase_ : Optional[int] = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): lowerCamelCase_ : List[str] = convert_classification(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) elif arch.endswith('''ForAudioFrameClassification''' ): lowerCamelCase_ : List[Any] = convert_diarization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) elif arch.endswith('''ForXVector''' ): lowerCamelCase_ : Dict = convert_xvector(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: lowerCamelCase_ : int = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(__UpperCAmelCase ) hf_model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model.""" ) parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""") parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""") __lowerCamelCase : int = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
418
'''simple docstring''' from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). " ,_lowerCAmelCase ,) class lowerCAmelCase__ ( _lowerCAmelCase ): A = RobertaConfig A = "roberta" def __init__( self : str , UpperCamelCase_ : Optional[Any] ) -> str: """simple docstring""" super().__init__(UpperCamelCase_ ) lowerCamelCase_ : List[str] = RobertaEmbeddings(UpperCamelCase_ ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " ,_lowerCAmelCase ,) class lowerCAmelCase__ ( _lowerCAmelCase ): A = RobertaConfig A = "roberta" def __init__( self : Optional[int] , UpperCamelCase_ : List[str] ) -> Tuple: """simple docstring""" super().__init__(UpperCamelCase_ ) lowerCamelCase_ : Union[str, Any] = config.num_labels lowerCamelCase_ : Dict = config.num_hidden_layers lowerCamelCase_ : Union[str, Any] = DeeRobertaModel(UpperCamelCase_ ) lowerCamelCase_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob ) lowerCamelCase_ : str = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[str]=-1 , UpperCamelCase_ : Optional[Any]=False , ) -> Tuple: """simple docstring""" lowerCamelCase_ : Union[str, Any] = self.num_layers try: lowerCamelCase_ : Union[str, Any] = self.roberta( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , ) lowerCamelCase_ : Union[str, Any] = outputs[1] lowerCamelCase_ : Optional[int] = self.dropout(UpperCamelCase_ ) lowerCamelCase_ : Dict = self.classifier(UpperCamelCase_ ) lowerCamelCase_ : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: lowerCamelCase_ : List[str] = e.message lowerCamelCase_ : List[str] = e.exit_layer lowerCamelCase_ : Optional[Any] = outputs[0] if not self.training: lowerCamelCase_ : str = entropy(UpperCamelCase_ ) lowerCamelCase_ : Tuple = [] lowerCamelCase_ : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression lowerCamelCase_ : List[Any] = MSELoss() lowerCamelCase_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: lowerCamelCase_ : Optional[int] = CrossEntropyLoss() lowerCamelCase_ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits lowerCamelCase_ : Optional[Any] = [] for highway_exit in outputs[-1]: lowerCamelCase_ : List[str] = highway_exit[0] if not self.training: highway_logits_all.append(UpperCamelCase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression lowerCamelCase_ : Union[str, Any] = MSELoss() lowerCamelCase_ : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: lowerCamelCase_ : Union[str, Any] = CrossEntropyLoss() lowerCamelCase_ : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(UpperCamelCase_ ) if train_highway: lowerCamelCase_ : Any = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: lowerCamelCase_ : Optional[int] = (loss,) + outputs if not self.training: lowerCamelCase_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: lowerCamelCase_ : int = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
418
1
"""simple docstring""" import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class __lowercase ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = ProphetNetTokenizer __lowerCAmelCase = False def _lowerCamelCase ( self ): super().setUp() __a : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _lowerCamelCase ( self , _UpperCAmelCase ): __a : List[str] = '''UNwant\u00E9d,running''' __a : List[Any] = '''unwanted, running''' return input_text, output_text def _lowerCamelCase ( self ): __a : Optional[Any] = self.tokenizer_class(self.vocab_file ) __a : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] ) def _lowerCamelCase ( self ): __a : Any = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def _lowerCamelCase ( self ): __a : Optional[int] = BasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _lowerCamelCase ( self ): __a : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def _lowerCamelCase ( self ): __a : Optional[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _lowerCamelCase ( self ): __a : int = BasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _lowerCamelCase ( self ): __a : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _lowerCamelCase ( self ): __a : Union[str, Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _lowerCamelCase ( self ): __a : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _lowerCamelCase ( self ): __a : Any = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def _lowerCamelCase ( self ): __a : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __a : str = {} for i, token in enumerate(_UpperCAmelCase ): __a : Tuple = i __a : Dict = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) @require_torch def _lowerCamelCase ( self ): __a : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) __a : Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __a : List[Any] = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] __a : List[str] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''pt''' ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) __a : Dict = list(batch.input_ids.numpy()[0] ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def _lowerCamelCase ( self ): self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def _lowerCamelCase ( self ): self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def _lowerCamelCase ( self ): self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) @slow def _lowerCamelCase ( self ): __a : Tuple = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) __a : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCAmelCase ) __a : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCAmelCase ) __a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) __a : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
52
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCamelCase = logging.get_logger(__name__) class __UpperCAmelCase (__A ): '''simple docstring''' _UpperCamelCase : Optional[int] = ['input_features', 'is_longer'] def __init__( self , snake_case_=64 , snake_case_=48_000 , snake_case_=480 , snake_case_=10 , snake_case_=1_024 , snake_case_=0.0 , snake_case_=False , snake_case_ = 0 , snake_case_ = 14_000 , snake_case_ = None , snake_case_ = "fusion" , snake_case_ = "repeatpad" , **snake_case_ , ): '''simple docstring''' super().__init__( feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , ) A__ : List[str] = top_db A__ : Optional[int] = truncation A__ : str = padding A__ : str = fft_window_size A__ : Any = (fft_window_size >> 1) + 1 A__ : Optional[Any] = hop_length A__ : Any = max_length_s A__ : int = max_length_s * sampling_rate A__ : Any = sampling_rate A__ : List[str] = frequency_min A__ : int = frequency_max A__ : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm=snake_case_ , mel_scale="""htk""" , ) A__ : List[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm="""slaney""" , mel_scale="""slaney""" , ) def lowerCamelCase ( self ): '''simple docstring''' A__ : List[Any] = copy.deepcopy(self.__dict__ ) A__ : int = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def lowerCamelCase ( self , snake_case_ , snake_case_ = None ): '''simple docstring''' A__ : Any = spectrogram( snake_case_ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case_ , log_mel="""dB""" , ) return log_mel_spectrogram.T def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : Union[str, Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk A__ : Optional[Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk A__ : str = [0] # randomly choose index for each part A__ : int = np.random.choice(ranges[0] ) A__ : Dict = np.random.choice(ranges[1] ) A__ : Dict = np.random.choice(ranges[2] ) A__ : Any = mel[idx_front : idx_front + chunk_frames, :] A__ : Optional[int] = mel[idx_middle : idx_middle + chunk_frames, :] A__ : Optional[int] = mel[idx_back : idx_back + chunk_frames, :] A__ : Dict = torch.tensor(mel[None, None, :] ) A__ : Dict = torch.nn.functional.interpolate( snake_case_ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=snake_case_ ) A__ : List[Any] = mel_shrink[0][0].numpy() A__ : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": A__ : int = True # random crop to max_length (for compatibility) -> this should be handled by self.pad A__ : Tuple = len(snake_case_ ) - max_length A__ : Any = np.random.randint(0 , overflow + 1 ) A__ : int = waveform[idx : idx + max_length] A__ : List[str] = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": A__ : Optional[int] = self._np_extract_fbank_features(snake_case_ , self.mel_filters ) A__ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed A__ : int = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. A__ : Union[str, Any] = np.stack([mel, mel, mel, mel] , axis=0 ) A__ : Dict = False else: A__ : List[Any] = self._random_mel_fusion(snake_case_ , snake_case_ , snake_case_ ) A__ : Optional[Any] = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: A__ : Tuple = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": A__ : Union[str, Any] = int(max_length / len(snake_case_ ) ) A__ : List[Any] = np.stack(np.tile(snake_case_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": A__ : List[Any] = int(max_length / len(snake_case_ ) ) A__ : Tuple = np.stack(np.tile(snake_case_ , snake_case_ ) ) A__ : Tuple = np.pad(snake_case_ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 ) if truncation == "fusion": A__ : str = self._np_extract_fbank_features(snake_case_ , self.mel_filters ) A__ : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: A__ : List[Any] = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , **snake_case_ , ): '''simple docstring''' A__ : Optional[Any] = truncation if truncation is not None else self.truncation A__ : Union[str, Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) A__ : List[Any] = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) A__ : int = is_batched_numpy or ( isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A__ : Any = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case_ , np.ndarray ): A__ : Any = np.asarray(snake_case_ , dtype=np.floataa ) elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A__ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A__ : Union[str, Any] = [np.asarray(snake_case_ )] # convert to mel spectrogram, truncate and pad if needed. A__ : Optional[Any] = [ self._get_input_mel(snake_case_ , max_length if max_length else self.nb_max_samples , snake_case_ , snake_case_ ) for waveform in raw_speech ] A__ : List[str] = [] A__ : Optional[Any] = [] for mel, longer in padded_inputs: input_mel.append(snake_case_ ) is_longer.append(snake_case_ ) if truncation == "fusion" and sum(snake_case_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer A__ : Optional[Any] = np.random.randint(0 , len(snake_case_ ) ) A__ : str = True if isinstance(input_mel[0] , snake_case_ ): A__ : Optional[Any] = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool A__ : Union[str, Any] = [[longer] for longer in is_longer] A__ : str = {"""input_features""": input_mel, """is_longer""": is_longer} A__ : Optional[int] = BatchFeature(snake_case_ ) if return_tensors is not None: A__ : Dict = input_features.convert_to_tensors(snake_case_ ) return input_features
363
0
'''simple docstring''' def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Tuple ) -> Union[str, Any]: __A : Tuple = [0 for i in range(r + 1 )] # nc0 = 1 __A : Dict = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __A : Union[str, Any] = min(__snake_case , __snake_case ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
703
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class SCREAMING_SNAKE_CASE (unittest.TestCase ): def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , ): '''simple docstring''' __A : Union[str, Any] = parent __A : Optional[Any] = batch_size __A : Union[str, Any] = image_size __A : Optional[int] = patch_size __A : int = num_channels __A : int = is_training __A : List[Any] = use_labels __A : Optional[int] = hidden_size __A : Union[str, Any] = num_hidden_layers __A : Optional[Any] = num_attention_heads __A : List[str] = intermediate_size __A : Any = hidden_act __A : Optional[Any] = hidden_dropout_prob __A : List[str] = attention_probs_dropout_prob __A : Union[str, Any] = type_sequence_label_size __A : Optional[int] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __A : Optional[int] = (image_size // patch_size) ** 2 __A : Dict = num_patches + 1 def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __A : str = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) return config, pixel_values def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : int = FlaxViTModel(config=_UpperCAmelCase) __A : Any = model(_UpperCAmelCase) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __A : int = (self.image_size, self.image_size) __A : Any = (self.patch_size, self.patch_size) __A : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : int = self.type_sequence_label_size __A : List[Any] = FlaxViTForImageClassification(config=_UpperCAmelCase) __A : Union[str, Any] = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __A : List[str] = 1 __A : Optional[int] = FlaxViTForImageClassification(_UpperCAmelCase) __A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __A : Optional[int] = model(_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = self.prepare_config_and_inputs() ( ( __A ) ,( __A ) , ) : Union[str, Any] = config_and_inputs __A : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ): lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : str = FlaxViTModelTester(self) __A : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : int = model_class(_UpperCAmelCase) __A : Optional[int] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A : Dict = [*signature.parameters.keys()] __A : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __A : str = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) __A : Optional[Any] = model_class(_UpperCAmelCase) @jax.jit def model_jitted(_UpperCAmelCase , **_UpperCAmelCase): return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase) with self.subTest('JIT Enabled'): __A : Optional[Any] = model_jitted(**_UpperCAmelCase).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): __A : Optional[Any] = model_jitted(**_UpperCAmelCase).to_tuple() self.assertEqual(len(_UpperCAmelCase) , len(_UpperCAmelCase)) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase): self.assertEqual(jitted_output.shape , output.shape) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' for model_class_name in self.all_model_classes: __A : Optional[Any] = model_class_name.from_pretrained('google/vit-base-patch16-224') __A : Dict = model(np.ones((1, 3, 224, 224))) self.assertIsNotNone(_UpperCAmelCase)
338
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase : str = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ '''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VanForImageClassification''', '''VanModel''', '''VanPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
239
import numpy as np def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Optional[Any]: __A : List[Any] = int(np.ceil((x_end - xa) / h ) ) __A : Tuple = np.zeros((n + 1,) ) __A : Tuple = ya __A : Optional[Any] = xa for k in range(a ): __A : List[Any] = f(a , y[k] ) __A : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) __A : Optional[int] = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) __A : Dict = f(x + h , y[k] + h * ka ) __A : Union[str, Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
239
1
def a__ ( A_ ): '''simple docstring''' __magic_name__ = set() # To detect a back edge, keep track of vertices currently in the recursion stack __magic_name__ = set() return any( node not in visited and depth_first_search(A_, A_, A_, A_ ) for node in graph ) def a__ ( A_, A_, A_, A_ ): '''simple docstring''' visited.add(A_ ) rec_stk.add(A_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(A_, A_, A_, A_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(A_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
707
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
0
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase__ ="\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" UpperCAmelCase__ ="\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" UpperCAmelCase__ ="\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n" UpperCAmelCase__ ="\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" UpperCAmelCase__ ="The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Union[str, Any] , A_ : List[Any] , A_ : Tuple=[1, 1_0, 1_0_0] , A_ : str=4 , A_ : Tuple=3.0 ): '''simple docstring''' if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=A_ ) as executor: __lowercase = [] __lowercase = Counter() __lowercase = 0 __lowercase = defaultdict(A_ ) for task_id, (candidates, test_case) in enumerate(zip(A_ , A_ ) ): for candidate in candidates: __lowercase = candidate + """\n""" + test_case __lowercase = (test_program, timeout, task_id, completion_id[task_id]) __lowercase = executor.submit(A_ , *A_ ) futures.append(A_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(A_ ): __lowercase = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) __lowercase , __lowercase = [], [] for result in results.values(): result.sort() __lowercase = [r[1]["""passed"""] for r in result] total.append(len(A_ ) ) correct.append(sum(A_ ) ) __lowercase = np.array(A_ ) __lowercase = np.array(A_ ) __lowercase = k __lowercase = {F'''pass@{k}''': estimate_pass_at_k(A_ , A_ , A_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def lowerCAmelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ): """simple docstring""" def estimator(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): __lowercase = itertools.repeat(UpperCamelCase__ , len(UpperCamelCase__ ) ) else: assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ) __lowercase = iter(UpperCamelCase__ ) return np.array([estimator(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , UpperCamelCase__ ) for n, c in zip(UpperCamelCase__ , UpperCamelCase__ )] )
616
"""simple docstring""" def lowerCAmelCase_ ( UpperCamelCase__ : int ): """simple docstring""" assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number_of_steps > 0 ), f'''number_of_steps needs to be positive integer, your input {number_of_steps}''' if number_of_steps == 1: return 1 __lowercase , __lowercase = 1, 1 for _ in range(number_of_steps - 1 ): __lowercase , __lowercase = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
616
1
"""simple docstring""" from __future__ import annotations import math def lowercase (snake_case__ : Union[str, Any] ) -> list[int]: '''simple docstring''' if num <= 0: lowerCAmelCase = f'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase = [True] * (num + 1) lowerCAmelCase = [] lowerCAmelCase = 2 lowerCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(SCREAMING_SNAKE_CASE_ ) # Set multiples of start be False for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE_ ): if sieve[i] is True: lowerCAmelCase = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(SCREAMING_SNAKE_CASE_ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
703
"""simple docstring""" from queue import PriorityQueue from typing import Any import numpy as np def lowercase (snake_case__ : dict , snake_case__ : str , snake_case__ : set , snake_case__ : set , snake_case__ : dict , snake_case__ : dict , snake_case__ : PriorityQueue , snake_case__ : dict , snake_case__ : float | int , ) -> float | int: '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCAmelCase = cst_fwd.get(snake_case__ , np.inf ) lowerCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) lowerCAmelCase = new_cost_f lowerCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowercase (snake_case__ : str , snake_case__ : str , snake_case__ : dict , snake_case__ : dict ) -> int: '''simple docstring''' lowerCAmelCase = -1 lowerCAmelCase = set() lowerCAmelCase = set() lowerCAmelCase = {source: 0} lowerCAmelCase = {destination: 0} lowerCAmelCase = {source: None} lowerCAmelCase = {destination: None} lowerCAmelCase = PriorityQueue() lowerCAmelCase = PriorityQueue() lowerCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCAmelCase , lowerCAmelCase = queue_forward.get() visited_forward.add(snake_case__ ) lowerCAmelCase , lowerCAmelCase = queue_backward.get() visited_backward.add(snake_case__ ) lowerCAmelCase = pass_and_relaxation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) lowerCAmelCase = pass_and_relaxation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCAmelCase = shortest_distance return shortest_path_distance a = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } a = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
529
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase__ = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
581
"""simple docstring""" def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str: if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) UpperCAmelCase__ : Dict = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b" UpperCAmelCase__ : Any = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b" UpperCAmelCase__ : Optional[Any] = max(len(lowerCAmelCase ) , len(lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase ) , b_binary.zfill(lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
182
0
A__ = { 0: '''0''', 1: '''1''', 2: '''2''', 3: '''3''', 4: '''4''', 5: '''5''', 6: '''6''', 7: '''7''', 8: '''8''', 9: '''9''', 10: '''a''', 11: '''b''', 12: '''c''', 13: '''d''', 14: '''e''', 15: '''f''', } def _lowerCAmelCase ( __lowerCAmelCase ) -> str: """simple docstring""" assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase ) snake_case__ : Tuple = int(__lowerCAmelCase ) snake_case__ : List[str] = '''''' snake_case__ : Dict = False if decimal < 0: snake_case__ : List[str] = True decimal *= -1 while decimal > 0: snake_case__ , snake_case__ : str = divmod(__lowerCAmelCase , 16 ) snake_case__ : int = values[remainder] + hexadecimal snake_case__ : Tuple = '''0x''' + hexadecimal if negative: snake_case__ : Tuple = '''-''' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
219
import random from typing import Any def _lowerCAmelCase ( __lowerCAmelCase ) -> list[Any]: """simple docstring""" for _ in range(len(__lowerCAmelCase ) ): snake_case__ : Union[str, Any] = random.randint(0 , len(__lowerCAmelCase ) - 1 ) snake_case__ : int = random.randint(0 , len(__lowerCAmelCase ) - 1 ) snake_case__ , snake_case__ : Optional[Any] = data[b], data[a] return data if __name__ == "__main__": A__ = [0, 1, 2, 3, 4, 5, 6, 7] A__ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
219
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() A_ : List[Any] = logging.get_logger(__name__) def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Optional[Any]=False , __magic_name__ : Dict=False ) -> Optional[int]: '''simple docstring''' snake_case__ : int = """backbone.""" if is_semantic else """""" snake_case__ : List[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ (f"{prefix}cls_token", """beit.embeddings.cls_token"""), (f"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""), (f"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""), (f"{prefix}pos_embed", """beit.embeddings.position_embeddings"""), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("""mask_token""", """beit.embeddings.mask_token"""), ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) else: # layernorm + classification head rename_keys.extend( [ ("""fc_norm.weight""", """beit.pooler.layernorm.weight"""), ("""fc_norm.bias""", """beit.pooler.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Tuple=False , __magic_name__ : Any=False ) -> Dict: '''simple docstring''' for i in range(config.num_hidden_layers ): snake_case__ : Tuple = """backbone.""" if is_semantic else """""" # queries, keys and values snake_case__ : int = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight" ) snake_case__ : List[str] = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias" ) snake_case__ : Any = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias" ) snake_case__ : Optional[int] = in_proj_weight[ : config.hidden_size, : ] snake_case__ : Optional[int] = q_bias snake_case__ : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : int = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : List[str] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained snake_case__ : Tuple = state_dict.pop(f"{prefix}blocks.{i}.gamma_1" ) snake_case__ : str = state_dict.pop(f"{prefix}blocks.{i}.gamma_2" ) snake_case__ : int = gamma_a snake_case__ : Dict = gamma_a def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Any ) -> Optional[int]: '''simple docstring''' snake_case__ : Optional[int] = dct.pop(__magic_name__ ) snake_case__ : str = val def UpperCamelCase__ ( ) -> Dict: '''simple docstring''' snake_case__ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Dict = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any=False ) -> Any: '''simple docstring''' snake_case__ : Any = False if """rvlcdip""" in checkpoint_url else True snake_case__ : Optional[Any] = BeitConfig(use_absolute_position_embeddings=__magic_name__ , use_mask_token=__magic_name__ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: snake_case__ : Optional[Any] = 10_24 snake_case__ : Optional[Any] = 40_96 snake_case__ : int = 24 snake_case__ : Tuple = 16 # labels if "rvlcdip" in checkpoint_url: snake_case__ : Union[str, Any] = 16 snake_case__ : Dict = """huggingface/label-files""" snake_case__ : str = """rvlcdip-id2label.json""" snake_case__ : Tuple = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case__ : int = idalabel snake_case__ : List[str] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys snake_case__ : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="""cpu""" )["""model"""] snake_case__ : Tuple = create_rename_keys(__magic_name__ , has_lm_head=__magic_name__ ) for src, dest in rename_keys: rename_key(__magic_name__ , __magic_name__ , __magic_name__ ) read_in_q_k_v(__magic_name__ , __magic_name__ , has_lm_head=__magic_name__ ) # load HuggingFace model snake_case__ : str = BeitForMaskedImageModeling(__magic_name__ ) if has_lm_head else BeitForImageClassification(__magic_name__ ) model.eval() model.load_state_dict(__magic_name__ ) # Check outputs on an image snake_case__ : Tuple = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__magic_name__ ) snake_case__ : str = prepare_img() snake_case__ : List[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ) snake_case__ : Dict = encoding["""pixel_values"""] snake_case__ : Union[str, Any] = model(__magic_name__ ) snake_case__ : Optional[int] = outputs.logits # verify logits snake_case__ : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__magic_name__ ), "Shape of logits not as expected" Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: if has_lm_head: snake_case__ : Optional[int] = """dit-base""" if """base""" in checkpoint_url else """dit-large""" else: snake_case__ : Dict = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip""" image_processor.push_to_hub( repo_path_or_name=Path(__magic_name__ , __magic_name__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__magic_name__ , ) model.push_to_hub( repo_path_or_name=Path(__magic_name__ , __magic_name__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__magic_name__ , ) if __name__ == "__main__": A_ : List[Any] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) A_ : Optional[Any] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
38
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient A_ : str = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN']) def UpperCamelCase (lowercase_: Union[str, Any] ) -> Dict: A__ : Union[str, Any] = test_results.split(""" """ ) A__ : Union[str, Any] = 0 A__ : Union[str, Any] = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. A__ : List[str] = expressions[-2] if """=""" in expressions[-1] else expressions[-1] for i, expression in enumerate(lowercase_ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def UpperCamelCase (lowercase_: Any ) -> Optional[int]: A__ : Dict = {} A__ : Union[str, Any] = None A__ : List[str] = False for line in failures_short_lines.split("""\n""" ): if re.search(r"""_ \[doctest\]""" , lowercase_ ): A__ : Tuple = True A__ : Dict = line.split(""" """ )[2] elif in_error and not line.split(""" """ )[0].isdigit(): A__ : Union[str, Any] = line A__ : List[str] = False return failures class _a : '''simple docstring''' def __init__( self , A__ , A__ ): A__ : Optional[Any] = title A__ : Tuple = doc_test_results["""time_spent"""].split(""",""" )[0] A__ : str = doc_test_results["""success"""] A__ : Optional[int] = doc_test_results["""failures"""] A__ : int = self.n_success + self.n_failures # Failures and success of the modeling tests A__ : Optional[int] = doc_test_results @property def __A ( self ): A__ : Tuple = [self._time_spent] A__ : Tuple = 0 for time in time_spent: A__ : Dict = time.split(""":""" ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(A__ ) == 1: A__ : Dict = [0, 0, time_parts[0]] A__ , A__ , A__ : Optional[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3600 + minutes * 60 + seconds A__ , A__ , A__ : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F"""{int(A__ )}h{int(A__ )}m{int(A__ )}s""" @property def __A ( self ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def __A ( self ): return { "type": "section", "text": { "type": "plain_text", "text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } @property def __A ( self ): return { "type": "section", "text": { "type": "plain_text", "text": ( F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in""" F""" {self.time}.""" ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } @property def __A ( self ): A__ : Tuple = 40 A__ : Dict = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(A__ , A__ )} A__ : str = """""" for category, failures in category_failures.items(): if len(A__ ) == 0: continue if report != "": report += "\n\n" report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(A__ ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"""The following examples had failures:\n\n\n{report}\n""", }, } @property def __A ( self ): A__ : Tuple = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(A__ ) @staticmethod def __A ( ): A__ : List[str] = [ { """type""": """section""", """text""": { """type""": """plain_text""", """text""": """There was an issue running the tests.""", }, """accessory""": { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True}, """url""": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } ] print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(A__ )} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=A__ , ) def __A ( self ): print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(self.payload )} ) ) A__ : Any = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed.""" A__ : Any = client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=A__ , ) def __A ( self , A__ , A__ , A__ , A__ ): A__ : Tuple = """""" for key, value in failures.items(): A__ : Any = value[:200] + """ [Truncated]""" if len(A__ ) > 250 else value failures_text += F"""*{key}*\n_{value}_\n\n""" A__ : Optional[Any] = job_name A__ : Union[str, Any] = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}} if job_link is not None: A__ : Dict = { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True}, """url""": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def __A ( self ): if self.thread_ts is None: raise ValueError("""Can only post reply if a post has been made.""" ) A__ : List[Any] = self.doc_test_results.pop("""job_link""" ) self.doc_test_results.pop("""failures""" ) self.doc_test_results.pop("""success""" ) self.doc_test_results.pop("""time_spent""" ) A__ : List[Any] = sorted(self.doc_test_results.items() , key=lambda A__ : t[0] ) for job, job_result in sorted_dict: if len(job_result["""failures"""] ): A__ : Optional[int] = F"""*Num failures* :{len(job_result['failed'] )} \n""" A__ : Any = job_result["""failures"""] A__ : List[str] = self.get_reply_blocks(A__ , A__ , A__ , text=A__ ) print("""Sending the following reply""" ) print(json.dumps({"""blocks""": blocks} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F"""Results for {job}""" , blocks=A__ , thread_ts=self.thread_ts["""ts"""] , ) time.sleep(1 ) def UpperCamelCase () -> Dict: A__ : int = os.environ["""GITHUB_RUN_ID"""] A__ : Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100""" A__ : Optional[int] = requests.get(lowercase_ ).json() A__ : List[str] = {} try: jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) A__ : Dict = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(lowercase_ ): A__ : str = requests.get(url + f"""&page={i + 2}""" ).json() jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return jobs except Exception as e: print("""Unknown error, could not fetch links.""" , lowercase_ ) return {} def UpperCamelCase (lowercase_: str ) -> Any: A__ : List[Any] = {} if os.path.exists(lowercase_ ): A__ : List[str] = os.listdir(lowercase_ ) for file in files: try: with open(os.path.join(lowercase_ , lowercase_ ) , encoding="""utf-8""" ) as f: A__ : Dict = f.read() except UnicodeDecodeError as e: raise ValueError(f"""Could not open {os.path.join(lowercase_ , lowercase_ )}.""" ) from e return _artifact def UpperCamelCase () -> Union[str, Any]: class _a : '''simple docstring''' def __init__( self , A__ ): A__ : str = name A__ : Optional[int] = [] def __str__( self ): return self.name def __A ( self , A__ ): self.paths.append({"""name""": self.name, """path""": path} ) A__ : Dict[str, Artifact] = {} A__ : int = filter(os.path.isdir , os.listdir() ) for directory in directories: A__ : Dict = directory if artifact_name not in _available_artifacts: A__ : int = Artifact(lowercase_ ) _available_artifacts[artifact_name].add_path(lowercase_ ) return _available_artifacts if __name__ == "__main__": A_ : str = get_job_links() A_ : Dict = retrieve_available_artifacts() A_ : int = collections.OrderedDict( [ ('*.py', 'API Examples'), ('*.md', 'MD Examples'), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' A_ : int = { v: { 'failed': [], 'failures': {}, } for v in docs.values() } # Link to the GitHub Action job A_ : Optional[Any] = github_actions_job_links.get('run_doctests') A_ : str = available_artifacts['doc_tests_gpu_test_reports'].paths[0] A_ : List[Any] = retrieve_artifact(artifact_path['name']) if "stats" in artifact: A_ , A_ , A_ : Any = handle_test_results(artifact['stats']) A_ : Union[str, Any] = failed A_ : int = success A_ : Optional[Any] = time_spent[1:-1] + ', ' A_ : Optional[Any] = extract_first_line_failure(artifact['failures_short']) for line in artifact["summary_short"].split('\n'): if re.search('FAILED', line): A_ : Dict = line.replace('FAILED ', '') A_ : Dict = line.split()[0].replace('\n', '') if "::" in line: A_ , A_ : Dict = line.split('::') else: A_ , A_ : Dict = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): A_ : List[str] = docs[file_regex] doc_test_results[category]["failed"].append(test) A_ : Optional[int] = all_failures[test] if test in all_failures else 'N/A' A_ : List[str] = failure break A_ : Optional[Any] = Message('🤗 Results of the doc tests.', doc_test_results) message.post() message.post_reply()
456
0
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) @add_end_docstrings( _UpperCamelCase , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class __magic_name__ ( _UpperCamelCase): '''simple docstring''' def _A ( self: str , _lowerCamelCase: Tuple ): if self.framework == "tf": SCREAMING_SNAKE_CASE_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": SCREAMING_SNAKE_CASE_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase ) else: raise ValueError('''Unsupported framework''' ) return masked_index def _A ( self: Optional[Any] , _lowerCamelCase: List[Any] ): SCREAMING_SNAKE_CASE_ = self.get_masked_index(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , ) def _A ( self: Any , _lowerCamelCase: Optional[Any] ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_UpperCAmelCase ) def _A ( self: List[Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Union[str, Any]=None , **_lowerCamelCase: List[str] ): if return_tensors is None: SCREAMING_SNAKE_CASE_ = self.framework SCREAMING_SNAKE_CASE_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.ensure_exactly_one_mask_token(_UpperCAmelCase ) return model_inputs def _A ( self: Dict , _lowerCamelCase: List[str] ): SCREAMING_SNAKE_CASE_ = self.model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = model_inputs['''input_ids'''] return model_outputs def _A ( self: int , _lowerCamelCase: str , _lowerCamelCase: str=5 , _lowerCamelCase: Tuple=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: SCREAMING_SNAKE_CASE_ = target_ids.shape[0] SCREAMING_SNAKE_CASE_ = model_outputs['''input_ids'''][0] SCREAMING_SNAKE_CASE_ = model_outputs['''logits'''] if self.framework == "tf": SCREAMING_SNAKE_CASE_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] SCREAMING_SNAKE_CASE_ = outputs.numpy() SCREAMING_SNAKE_CASE_ = outputs[0, masked_index, :] SCREAMING_SNAKE_CASE_ = stable_softmax(_UpperCAmelCase , axis=-1 ) if target_ids is not None: SCREAMING_SNAKE_CASE_ = tf.gather_nd(tf.squeeze(_UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) ) SCREAMING_SNAKE_CASE_ = tf.expand_dims(_UpperCAmelCase , 0 ) SCREAMING_SNAKE_CASE_ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = topk.values.numpy(), topk.indices.numpy() else: SCREAMING_SNAKE_CASE_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample SCREAMING_SNAKE_CASE_ = outputs[0, masked_index, :] SCREAMING_SNAKE_CASE_ = logits.softmax(dim=-1 ) if target_ids is not None: SCREAMING_SNAKE_CASE_ = probs[..., target_ids] SCREAMING_SNAKE_CASE_ = probs.topk(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): SCREAMING_SNAKE_CASE_ = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place SCREAMING_SNAKE_CASE_ = input_ids.numpy().copy() if target_ids is not None: SCREAMING_SNAKE_CASE_ = target_ids[p].tolist() SCREAMING_SNAKE_CASE_ = p # Filter padding out: SCREAMING_SNAKE_CASE_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(_UpperCAmelCase ) result.append(_UpperCAmelCase ) if single_mask: return result[0] return result def _A ( self: Any , _lowerCamelCase: Any , _lowerCamelCase: List[str]=None ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_ = [targets] try: SCREAMING_SNAKE_CASE_ = self.tokenizer.get_vocab() except Exception: SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = [] for target in targets: SCREAMING_SNAKE_CASE_ = vocab.get(_UpperCAmelCase , _UpperCAmelCase ) if id_ is None: SCREAMING_SNAKE_CASE_ = self.tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , max_length=1 , truncation=_UpperCAmelCase , )['''input_ids'''] if len(_UpperCAmelCase ) == 0: logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " '''We cannot replace it with anything meaningful, ignoring it''' ) continue SCREAMING_SNAKE_CASE_ = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." ) target_ids.append(id_ ) SCREAMING_SNAKE_CASE_ = list(set(_UpperCAmelCase ) ) if len(_UpperCAmelCase ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) SCREAMING_SNAKE_CASE_ = np.array(_UpperCAmelCase ) return target_ids def _A ( self: int , _lowerCamelCase: Optional[Any]=None , _lowerCamelCase: List[str]=None ): SCREAMING_SNAKE_CASE_ = {} if targets is not None: SCREAMING_SNAKE_CASE_ = self.get_target_ids(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = target_ids if top_k is not None: SCREAMING_SNAKE_CASE_ = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self: Optional[Any] , _lowerCamelCase: Union[str, Any] , *_lowerCamelCase: Optional[Any] , **_lowerCamelCase: List[Any] ): SCREAMING_SNAKE_CASE_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1: return outputs[0] return outputs
712
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __magic_name__ ( __UpperCAmelCase): '''simple docstring''' def __init__( self: List[Any] , *_lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any]=None , _lowerCamelCase: Dict=None , **_lowerCamelCase: Dict ): super().__init__(*_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = eval_examples SCREAMING_SNAKE_CASE_ = post_process_function def _A ( self: Tuple , _lowerCamelCase: Optional[Dataset] = None , _lowerCamelCase: List[str]=None , _lowerCamelCase: Optional[List[str]] = None , _lowerCamelCase: str = "eval" , **_lowerCamelCase: Union[str, Any] , ): SCREAMING_SNAKE_CASE_ = gen_kwargs.copy() SCREAMING_SNAKE_CASE_ = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length ) SCREAMING_SNAKE_CASE_ = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams ) SCREAMING_SNAKE_CASE_ = gen_kwargs SCREAMING_SNAKE_CASE_ = self.eval_dataset if eval_dataset is None else eval_dataset SCREAMING_SNAKE_CASE_ = self.get_eval_dataloader(_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE_ = self.compute_metrics SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = time.time() SCREAMING_SNAKE_CASE_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: SCREAMING_SNAKE_CASE_ = eval_loop( _lowerCamelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , ) finally: SCREAMING_SNAKE_CASE_ = compute_metrics SCREAMING_SNAKE_CASE_ = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( _lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default SCREAMING_SNAKE_CASE_ = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = self.compute_metrics(_lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"{metric_key_prefix}_" ): SCREAMING_SNAKE_CASE_ = metrics.pop(_lowerCamelCase ) metrics.update(output.metrics ) else: SCREAMING_SNAKE_CASE_ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_lowerCamelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) SCREAMING_SNAKE_CASE_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase ) return metrics def _A ( self: List[str] , _lowerCamelCase: int , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[int]=None , _lowerCamelCase: str = "test" , **_lowerCamelCase: str ): SCREAMING_SNAKE_CASE_ = gen_kwargs.copy() SCREAMING_SNAKE_CASE_ = self.get_test_dataloader(_lowerCamelCase ) # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE_ = self.compute_metrics SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = time.time() SCREAMING_SNAKE_CASE_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: SCREAMING_SNAKE_CASE_ = eval_loop( _lowerCamelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , ) finally: SCREAMING_SNAKE_CASE_ = compute_metrics SCREAMING_SNAKE_CASE_ = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( _lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output SCREAMING_SNAKE_CASE_ = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , '''predict''' ) SCREAMING_SNAKE_CASE_ = self.compute_metrics(_lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"{metric_key_prefix}_" ): SCREAMING_SNAKE_CASE_ = metrics.pop(_lowerCamelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
89
0
"""simple docstring""" def __lowercase ( snake_case_ : list ) ->float: '''simple docstring''' __A : Tuple = 0 while len(snake_case_ ) > 1: __A : List[Any] = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): __A : Dict = files.index(min(snake_case_ ) ) temp += files[min_index] files.pop(snake_case_ ) files.append(snake_case_ ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
177
"""simple docstring""" def __lowercase ( snake_case_ : int ,snake_case_ : int ) ->int: '''simple docstring''' return int((input_a, input_a).count(0 ) == 0 ) def __lowercase ( ) ->None: '''simple docstring''' assert and_gate(0 ,0 ) == 0 assert and_gate(0 ,1 ) == 0 assert and_gate(1 ,0 ) == 0 assert and_gate(1 ,1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
177
1
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) class snake_case ( lowercase_ ): """simple docstring""" _a = ["""pixel_values"""] def __init__( self, _lowercase = True, _lowercase = None, _lowercase = PILImageResampling.BILINEAR, _lowercase = True, _lowercase = None, _lowercase = True, _lowercase = 1 / 255, _lowercase = True, _lowercase = None, _lowercase = None, **_lowercase, ) -> None: super().__init__(**_lowercase ) SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 256} SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase, default_to_square=_lowercase ) SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase, param_name='crop_size' ) SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = resample SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = do_rescale SCREAMING_SNAKE_CASE_ = rescale_factor SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def a__ ( self, _lowercase, _lowercase, _lowercase = PILImageResampling.BICUBIC, _lowercase = None, **_lowercase, ) -> np.ndarray: SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase, default_to_square=_lowercase ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(_lowercase, size=size['shortest_edge'], default_to_square=_lowercase ) return resize(_lowercase, size=_lowercase, resample=_lowercase, data_format=_lowercase, **_lowercase ) def a__ ( self, _lowercase, _lowercase, _lowercase = None, **_lowercase, ) -> np.ndarray: SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(_lowercase, size=(size['height'], size['width']), data_format=_lowercase, **_lowercase ) def a__ ( self, _lowercase, _lowercase, _lowercase = None, **_lowercase ) -> np.ndarray: return rescale(_lowercase, scale=_lowercase, data_format=_lowercase, **_lowercase ) def a__ ( self, _lowercase, _lowercase, _lowercase, _lowercase = None, **_lowercase, ) -> np.ndarray: return normalize(_lowercase, mean=_lowercase, std=_lowercase, data_format=_lowercase, **_lowercase ) def a__ ( self, _lowercase, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = ChannelDimension.FIRST, **_lowercase, ) -> Tuple: SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ = size if size is not None else self.size SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase, default_to_square=_lowercase ) SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase, param_name='crop_size' ) SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ = [to_numpy_array(_lowercase ) for image in images] if do_resize: SCREAMING_SNAKE_CASE_ = [self.resize(image=_lowercase, size=_lowercase, resample=_lowercase ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE_ = [self.center_crop(image=_lowercase, size=_lowercase ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE_ = [self.rescale(image=_lowercase, scale=_lowercase ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_ = [self.normalize(image=_lowercase, mean=_lowercase, std=_lowercase ) for image in images] SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_lowercase, _lowercase ) for image in images] SCREAMING_SNAKE_CASE_ = {'pixel_values': images} return BatchFeature(data=_lowercase, tensor_type=_lowercase ) def a__ ( self, _lowercase, _lowercase = None ) -> Dict: SCREAMING_SNAKE_CASE_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_lowercase ) != len(_lowercase ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(_lowercase ): SCREAMING_SNAKE_CASE_ = target_sizes.numpy() SCREAMING_SNAKE_CASE_ = [] for idx in range(len(_lowercase ) ): SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode='bilinear', align_corners=_lowercase ) SCREAMING_SNAKE_CASE_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_lowercase ) else: SCREAMING_SNAKE_CASE_ = logits.argmax(dim=1 ) SCREAMING_SNAKE_CASE_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
238
'''simple docstring''' from math import ceil def _UpperCamelCase ( lowerCAmelCase__: int = 1001 ) -> int: SCREAMING_SNAKE_CASE_ = 1 for i in range(1 ,int(ceil(n / 2.0 ) ) ): SCREAMING_SNAKE_CASE_ = 2 * i + 1 SCREAMING_SNAKE_CASE_ = 2 * i SCREAMING_SNAKE_CASE_ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: SCREAMING_SNAKE_CASE : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number")
238
1
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ): '''simple docstring''' if config_name_or_path is None: _lowerCAmelCase : List[Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: _lowerCAmelCase : Tuple = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: _lowerCAmelCase : List[str] = question_encoder_name_or_path _lowerCAmelCase : List[Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. _lowerCAmelCase : Union[str, Any] = RagConfig.from_pretrained(_lowerCamelCase ) _lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_lowerCamelCase ) _lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = gen_config _lowerCAmelCase : Any = question_encoder_config _lowerCAmelCase : Optional[Any] = model_class.from_pretrained_question_encoder_generator( _lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase ) rag_model.save_pretrained(_lowerCamelCase ) # Sanity check. model_class.from_pretrained(_lowerCamelCase ) # Save tokenizers. _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_lowerCamelCase ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) _lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( "--model_type", choices=["rag_sequence", "rag_token"], required=True, type=str, help="RAG model type: rag_sequence, rag_token", ) parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.") parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier") parser.add_argument( "--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier" ) parser.add_argument( "--generator_tokenizer_name_or_path", type=str, help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``", ) parser.add_argument( "--question_encoder_tokenizer_name_or_path", type=str, help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``", ) parser.add_argument( "--config_name_or_path", type=str, help=( "Identifier of the model config to use, if not provided, resolves to a base config for a given" " ``model_type``" ), ) _snake_case = parser.parse_args() _snake_case = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
500
import math def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [True] * n _lowerCAmelCase : Optional[int] = False _lowerCAmelCase : Tuple = False _lowerCAmelCase : Optional[Any] = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): _lowerCAmelCase : Union[str, Any] = i * 2 while index < n: _lowerCAmelCase : int = False _lowerCAmelCase : Optional[Any] = index + i _lowerCAmelCase : str = [2] for i in range(3 , _lowerCamelCase , 2 ): if is_prime[i]: primes.append(_lowerCamelCase ) return primes def A ( _lowerCamelCase = 999_966_663_333 ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = math.floor(math.sqrt(_lowerCamelCase ) ) + 100 _lowerCAmelCase : str = prime_sieve(_lowerCamelCase ) _lowerCAmelCase : List[str] = 0 _lowerCAmelCase : Union[str, Any] = 0 _lowerCAmelCase : Optional[int] = primes[prime_index] while (last_prime**2) <= limit: _lowerCAmelCase : int = primes[prime_index + 1] _lowerCAmelCase : Dict = last_prime**2 _lowerCAmelCase : Optional[Any] = next_prime**2 # Get numbers divisible by lps(current) _lowerCAmelCase : List[Any] = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) _lowerCAmelCase : Any = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps _lowerCAmelCase : List[str] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair _lowerCAmelCase : Optional[int] = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
500
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Tuple=18 , UpperCamelCase_ : Any=30 , UpperCamelCase_ : Optional[int]=400 , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Dict=[0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCamelCase_ : Optional[int]=[0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCamelCase_ : Any=True , ) -> str: """simple docstring""" lowerCamelCase_ : int = size if size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase_ : str = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} lowerCamelCase_ : Any = parent lowerCamelCase_ : int = batch_size lowerCamelCase_ : int = num_channels lowerCamelCase_ : int = image_size lowerCamelCase_ : Optional[Any] = min_resolution lowerCamelCase_ : Union[str, Any] = max_resolution lowerCamelCase_ : Optional[int] = do_resize lowerCamelCase_ : Any = size lowerCamelCase_ : Tuple = do_center_crop lowerCamelCase_ : List[str] = crop_size lowerCamelCase_ : Optional[int] = do_normalize lowerCamelCase_ : List[str] = image_mean lowerCamelCase_ : Any = image_std lowerCamelCase_ : str = do_convert_rgb def __UpperCamelCase ( self : List[str] ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Union[str, Any]=False ) -> Union[str, Any]: """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase_ : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCamelCase_ : Any = [] for i in range(self.batch_size ): lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase_ : str = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCamelCase_ : Dict = [torch.from_numpy(UpperCamelCase_ ) for x in image_inputs] return image_inputs @require_torch @require_vision class lowerCAmelCase__ ( _lowerCAmelCase ,unittest.TestCase ): A = ChineseCLIPImageProcessor if is_vision_available() else None def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" lowerCamelCase_ : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase_ ) @property def __UpperCamelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) ) def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) lowerCamelCase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __UpperCamelCase ( self : str ) -> Any: """simple docstring""" pass def __UpperCamelCase ( self : Any ) -> str: """simple docstring""" lowerCamelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input lowerCamelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCamelCase_ : Tuple = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __UpperCamelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input lowerCamelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCamelCase_ : List[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __UpperCamelCase ( self : Dict ) -> int: """simple docstring""" lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCamelCase_ : List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class lowerCAmelCase__ ( _lowerCAmelCase ,unittest.TestCase ): A = ChineseCLIPImageProcessor if is_vision_available() else None def __UpperCamelCase ( self : str ) -> int: """simple docstring""" lowerCamelCase_ : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase_ ) lowerCamelCase_ : Tuple = 3 @property def __UpperCamelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) ) def __UpperCamelCase ( self : Optional[Any] ) -> int: """simple docstring""" pass def __UpperCamelCase ( self : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input lowerCamelCase_ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCamelCase_ : int = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
418
'''simple docstring''' def __snake_case (__UpperCAmelCase ): """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
418
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : List[str] = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCAmelCase ( )-> List[str]: """simple docstring""" with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" snake_case_ : str = [1, 2, 3] with pytest.raises(__magic_name__ ): with parallel_backend("unsupported backend" ): map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 ) with pytest.raises(__magic_name__ ): with parallel_backend("unsupported backend" ): map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" ,[2, -1] ) def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = [1, 2] snake_case_ : Union[str, Any] = {"a": 1, "b": 2} snake_case_ : str = {"a": [1, 2], "b": [3, 4]} snake_case_ : List[str] = {"a": {"1": 1}, "b": 2} snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4} snake_case_ : Tuple = [2, 3] snake_case_ : str = {"a": 2, "b": 3} snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]} snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3} snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
653
1
import math from collections.abc import Callable def _lowerCAmelCase ( A__: Callable[[float], float] , A__: float , A__: float ): '''simple docstring''' UpperCAmelCase = xa UpperCAmelCase = xa while True: if x_n == x_na or function(A__ ) == function(A__ ): raise ZeroDivisionError('''float division by zero, could not find root''' ) UpperCAmelCase = x_na - ( function(A__ ) / ((function(A__ ) - function(A__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na UpperCAmelCase = x_na UpperCAmelCase = x_na def _lowerCAmelCase ( A__: float ): '''simple docstring''' return math.pow(A__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
704
import re import string import numpy as np import datasets __magic_name__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" __magic_name__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" __magic_name__ = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): '''simple docstring''' def snake_case_ ( self ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , reference_urls=[] , ) def snake_case_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=False , _snake_case=False , _snake_case=False , ) -> Optional[Any]: """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: UpperCAmelCase = np.array([re.sub(_snake_case , '''''' , _snake_case ) for x in predictions] ) UpperCAmelCase = np.array([re.sub(_snake_case , '''''' , _snake_case ) for x in references] ) else: UpperCAmelCase = np.asarray(_snake_case ) UpperCAmelCase = np.asarray(_snake_case ) if ignore_case: UpperCAmelCase = np.char.lower(_snake_case ) UpperCAmelCase = np.char.lower(_snake_case ) if ignore_punctuation: UpperCAmelCase = string.punctuation.maketrans('''''' , '''''' , string.punctuation ) UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case ) UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case ) if ignore_numbers: UpperCAmelCase = string.digits.maketrans('''''' , '''''' , string.digits ) UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case ) UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case ) UpperCAmelCase = predictions == references return {"exact_match": np.mean(_snake_case ) * 100}
391
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ : Optional[int] = { "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : int = ["ConvNextFeatureExtractor"] UpperCAmelCase_ : Tuple = ["ConvNextImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : str = [ "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvNextForImageClassification", "ConvNextModel", "ConvNextPreTrainedModel", "ConvNextBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[int] = [ "TFConvNextForImageClassification", "TFConvNextModel", "TFConvNextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
21
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCAmelCase = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
201
0
def snake_case_ (__A : int , __A : int ) -> Dict: if b == 0: return 1 if (b % 2) == 0: return actual_power(__A , int(b / 2 ) ) * actual_power(__A , int(b / 2 ) ) else: return a * actual_power(__A , int(b / 2 ) ) * actual_power(__A , int(b / 2 ) ) def snake_case_ (__A : int , __A : int ) -> float: if b < 0: return 1 / actual_power(__A , __A ) return actual_power(__A , __A ) if __name__ == "__main__": print(power(-2, -3))
218
from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowerCamelCase : Optional[torch.FloatTensor] =None lowerCamelCase : torch.FloatTensor =None lowerCamelCase : Optional[Tuple[torch.FloatTensor]] =None lowerCamelCase : Optional[Tuple[torch.FloatTensor]] =None class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=5_12 , lowerCAmelCase : int="cls" , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=True , **lowerCAmelCase : int , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowerCAmelCase : Dict = project_dim __lowerCAmelCase : Dict = pooler_fn __lowerCAmelCase : Any = learn_encoder __lowerCAmelCase : Optional[Any] = use_attention_mask class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowerCamelCase : Tuple =[R"pooler", R"logit_scale"] lowerCamelCase : List[str] =[R"position_ids", R"predictions.decoder.bias"] lowerCamelCase : List[Any] ="roberta" lowerCamelCase : List[str] =RobertaSeriesConfig def __init__( self : Dict , lowerCAmelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().__init__(lowerCAmelCase ) __lowerCAmelCase : Any = XLMRobertaModel(lowerCAmelCase ) __lowerCAmelCase : int = nn.Linear(config.hidden_size , config.project_dim ) __lowerCAmelCase : Union[str, Any] = getattr(lowerCAmelCase , """has_pre_transformation""" , lowerCAmelCase ) if self.has_pre_transformation: __lowerCAmelCase : Dict = nn.Linear(config.hidden_size , config.project_dim ) __lowerCAmelCase : Optional[int] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> int: """simple docstring""" __lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __lowerCAmelCase : int = self.base_model( input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , output_attentions=lowerCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCAmelCase , ) if self.has_pre_transformation: __lowerCAmelCase : Union[str, Any] = outputs["""hidden_states"""][-2] __lowerCAmelCase : str = self.pre_LN(lowerCAmelCase ) __lowerCAmelCase : str = self.transformation_pre(lowerCAmelCase ) return TransformationModelOutput( projection_state=lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: __lowerCAmelCase : Any = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
218
1
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "Visual-Attention-Network/van-base": ( "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json" ), } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Optional[int] = "van" def __init__( self , _lowerCAmelCase=2_2_4 , _lowerCAmelCase=3 , _lowerCAmelCase=[7, 3, 3, 3] , _lowerCAmelCase=[4, 2, 2, 2] , _lowerCAmelCase=[6_4, 1_2_8, 3_2_0, 5_1_2] , _lowerCAmelCase=[3, 3, 1_2, 3] , _lowerCAmelCase=[8, 8, 4, 4] , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=1E-2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , **_lowerCAmelCase , ): super().__init__(**_lowerCAmelCase ) _lowercase : Tuple = image_size _lowercase : Dict = num_channels _lowercase : Any = patch_sizes _lowercase : List[str] = strides _lowercase : List[Any] = hidden_sizes _lowercase : Optional[int] = depths _lowercase : Dict = mlp_ratios _lowercase : Dict = hidden_act _lowercase : List[str] = initializer_range _lowercase : Optional[Any] = layer_norm_eps _lowercase : Optional[int] = layer_scale_init_value _lowercase : Tuple = drop_path_rate _lowercase : Any = dropout_rate
66
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') lowerCamelCase__ : str = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : __a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""}) __a : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) __a : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""}) __a : Optional[str] = field( default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) __a : bool = field( default=__a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) __a : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) __a : bool = field( default=__a , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) @dataclass class _UpperCAmelCase : __a : Optional[str] = field(default=__a , metadata={"""help""": """The input training data file (a text file)."""}) __a : Optional[str] = field( default=__a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) __a : bool = field( default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""}) __a : Optional[int] = field( default=__a , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) __a : Optional[int] = field( default=__a , metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __a : bool = field( default=__a , metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } , ) __a : Optional[int] = field( default=__a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) __a : Optional[int] = field( default=__a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def __snake_case ( self ) -> Union[str, Any]: '''simple docstring''' if self.train_file is not None: _UpperCAmelCase : Any = self.train_file.split(""".""" )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _UpperCAmelCase : int = self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class _UpperCAmelCase : __a : PreTrainedTokenizerBase __a : Union[bool, str, PaddingStrategy] = True __a : Optional[int] = None __a : Optional[int] = None def __call__( self , _A ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = """label""" if """label""" in features[0].keys() else """labels""" _UpperCAmelCase : Any = [feature.pop(_A ) for feature in features] _UpperCAmelCase : Optional[int] = len(_A ) _UpperCAmelCase : Union[str, Any] = len(features[0]["""input_ids"""] ) _UpperCAmelCase : str = [ [{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features ] _UpperCAmelCase : List[str] = list(chain(*_A ) ) _UpperCAmelCase : int = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) # Un-flatten _UpperCAmelCase : Optional[int] = {k: v.view(_A , _A , -1 ) for k, v in batch.items()} # Add back labels _UpperCAmelCase : Any = torch.tensor(_A , dtype=torch.intaa ) return batch def UpperCamelCase ( ) -> Dict: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_swag""", _lowerCAmelCase, _lowerCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(_lowerCAmelCase ) datasets.utils.logging.set_verbosity(_lowerCAmelCase ) transformers.utils.logging.set_verbosity(_lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _UpperCAmelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _UpperCAmelCase : Dict = {} if data_args.train_file is not None: _UpperCAmelCase : Optional[int] = data_args.train_file if data_args.validation_file is not None: _UpperCAmelCase : Optional[int] = data_args.validation_file _UpperCAmelCase : int = data_args.train_file.split(""".""" )[-1] _UpperCAmelCase : List[Any] = load_dataset( _lowerCAmelCase, data_files=_lowerCAmelCase, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Downloading and loading the swag dataset from the hub. _UpperCAmelCase : List[str] = load_dataset( """swag""", """regular""", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : Any = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : List[str] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=_lowerCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _UpperCAmelCase : str = [f'''ending{i}''' for i in range(4 )] _UpperCAmelCase : List[Any] = """sent1""" _UpperCAmelCase : str = """sent2""" if data_args.max_seq_length is None: _UpperCAmelCase : Tuple = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( """The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value""" """ of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can""" """ override this default with `--block_size xxx`.""" ) _UpperCAmelCase : List[str] = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) _UpperCAmelCase : Tuple = min(data_args.max_seq_length, tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(_lowerCAmelCase : str ): _UpperCAmelCase : int = [[context] * 4 for context in examples[context_name]] _UpperCAmelCase : List[Any] = examples[question_header_name] _UpperCAmelCase : List[str] = [ [f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(_lowerCAmelCase ) ] # Flatten out _UpperCAmelCase : Any = list(chain(*_lowerCAmelCase ) ) _UpperCAmelCase : Any = list(chain(*_lowerCAmelCase ) ) # Tokenize _UpperCAmelCase : Dict = tokenizer( _lowerCAmelCase, _lowerCAmelCase, truncation=_lowerCAmelCase, max_length=_lowerCAmelCase, padding="""max_length""" if data_args.pad_to_max_length else False, ) # Un-flatten return {k: [v[i : i + 4] for i in range(0, len(_lowerCAmelCase ), 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) _UpperCAmelCase : Any = raw_datasets["""train"""] if data_args.max_train_samples is not None: _UpperCAmelCase : List[Any] = min(len(_lowerCAmelCase ), data_args.max_train_samples ) _UpperCAmelCase : List[Any] = train_dataset.select(range(_lowerCAmelCase ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): _UpperCAmelCase : List[Any] = train_dataset.map( _lowerCAmelCase, batched=_lowerCAmelCase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) _UpperCAmelCase : Optional[Any] = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: _UpperCAmelCase : int = min(len(_lowerCAmelCase ), data_args.max_eval_samples ) _UpperCAmelCase : List[str] = eval_dataset.select(range(_lowerCAmelCase ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): _UpperCAmelCase : List[str] = eval_dataset.map( _lowerCAmelCase, batched=_lowerCAmelCase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator _UpperCAmelCase : List[Any] = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=_lowerCAmelCase, pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(_lowerCAmelCase : List[Any] ): _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = eval_predictions _UpperCAmelCase : Dict = np.argmax(_lowerCAmelCase, axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _UpperCAmelCase : Union[str, Any] = Trainer( model=_lowerCAmelCase, args=_lowerCAmelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=_lowerCAmelCase, data_collator=_lowerCAmelCase, compute_metrics=_lowerCAmelCase, ) # Training if training_args.do_train: _UpperCAmelCase : Dict = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase : Optional[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase : int = last_checkpoint _UpperCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=_lowerCAmelCase ) trainer.save_model() # Saves the tokenizer too for easy upload _UpperCAmelCase : List[Any] = train_result.metrics _UpperCAmelCase : Dict = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase ) ) _UpperCAmelCase : List[Any] = min(_lowerCAmelCase, len(_lowerCAmelCase ) ) trainer.log_metrics("""train""", _lowerCAmelCase ) trainer.save_metrics("""train""", _lowerCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _UpperCAmelCase : Optional[Any] = trainer.evaluate() _UpperCAmelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase ) _UpperCAmelCase : Optional[int] = min(_lowerCAmelCase, len(_lowerCAmelCase ) ) trainer.log_metrics("""eval""", _lowerCAmelCase ) trainer.save_metrics("""eval""", _lowerCAmelCase ) _UpperCAmelCase : int = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """multiple-choice""", """dataset_tags""": """swag""", """dataset_args""": """regular""", """dataset""": """SWAG""", """language""": """en""", } if training_args.push_to_hub: trainer.push_to_hub(**_lowerCAmelCase ) else: trainer.create_model_card(**_lowerCAmelCase ) def UpperCamelCase ( _lowerCAmelCase : Tuple ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
238
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=snake_case_ ): """simple docstring""" __magic_name__ = ["""torch""", """torchsde"""] def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Any: requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> List[str]: requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> List[str]: requires_backends(cls , ['''torch''', '''torchsde'''] )
417
'''simple docstring''' from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake __UpperCamelCase : Any = numpy.array([0, 0]) __UpperCamelCase : Optional[int] = numpy.array([0.5, 0.8_660_254]) __UpperCamelCase : int = numpy.array([1, 0]) __UpperCamelCase : Dict = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def lowercase ( lowerCAmelCase : list[numpy.ndarray] , lowerCAmelCase : int): """simple docstring""" _A : str = initial_vectors for _ in range(lowerCAmelCase): _A : Any = iteration_step(lowerCAmelCase) return vectors def lowercase ( lowerCAmelCase : list[numpy.ndarray]): """simple docstring""" _A : Any = [] for i, start_vector in enumerate(vectors[:-1]): _A : List[Any] = vectors[i + 1] new_vectors.append(lowerCAmelCase) _A : Optional[Any] = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60)) new_vectors.append(start_vector + difference_vector * 2 / 3) new_vectors.append(vectors[-1]) return new_vectors def lowercase ( lowerCAmelCase : numpy.ndarray , lowerCAmelCase : float): """simple docstring""" _A : Any = numpy.radians(lowerCAmelCase) _A , _A : str = numpy.cos(lowerCAmelCase), numpy.sin(lowerCAmelCase) _A : Dict = numpy.array(((c, -s), (s, c))) return numpy.dot(lowerCAmelCase , lowerCAmelCase) def lowercase ( lowerCAmelCase : list[numpy.ndarray]): """simple docstring""" _A : Tuple = plt.gca() axes.set_aspect('''equal''') # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() _A , _A : Any = zip(*lowerCAmelCase) plt.plot(lowerCAmelCase , lowerCAmelCase) plt.show() if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : Optional[int] = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
417
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__(self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , ): '''simple docstring''' lowerCamelCase = size if size is not None else {"height": 18, "width": 18} lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = num_channels lowerCamelCase = image_size lowerCamelCase = min_resolution lowerCamelCase = max_resolution lowerCamelCase = do_resize lowerCamelCase = size lowerCamelCase = apply_ocr def _a (self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCamelCase__ ( lowercase_ , unittest.TestCase): """simple docstring""" _A = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _a (self ): '''simple docstring''' lowerCamelCase = LayoutLMvaImageProcessingTester(self ) @property def _a (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _a (self ): '''simple docstring''' lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "size" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "apply_ocr" ) ) def _a (self ): '''simple docstring''' lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def _a (self ): '''simple docstring''' pass def _a (self ): '''simple docstring''' lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , UpperCamelCase__ ) self.assertIsInstance(encoding.boxes , UpperCamelCase__ ) # Test batched lowerCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _a (self ): '''simple docstring''' lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _a (self ): '''simple docstring''' lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _a (self ): '''simple docstring''' lowerCamelCase = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCamelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) lowerCamelCase = Image.open(ds[0]["file"] ).convert("RGB" ) lowerCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCamelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowerCamelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCamelCase__ ) self.assertListEqual(encoding.boxes , UpperCamelCase__ ) # with apply_OCR = False lowerCamelCase = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) lowerCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
623
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _UpperCAmelCase ( a : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int): '''simple docstring''' super().__init__() snake_case__ = module snake_case__ = nn.Sequential( nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , ) snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str): '''simple docstring''' return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _lowercase : Dict = '''bigscience/bloom-1b7''' # Constant values _lowercase : Any = 2.109_6595_5269_2574 _lowercase : Tuple = '''Hello my name is''' _lowercase : List[Any] = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) _lowercase : List[str] = 10 def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = AutoTokenizer.from_pretrained(self.model_name) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : str): '''simple docstring''' super().setUp() # Models and tokenizer snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""") snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") def __magic_name__ ( self : Tuple): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = self.model_abit.config self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config""")) snake_case__ = config.to_dict() snake_case__ = config.to_diff_dict() snake_case__ = config.to_json_string() def __magic_name__ ( self : Dict): '''simple docstring''' from bitsandbytes.nn import Paramsabit snake_case__ = self.model_fpaa.get_memory_footprint() snake_case__ = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE) snake_case__ = get_some_linear_layer(self.model_abit) self.assertTrue(linear.weight.__class__ == Paramsabit) def __magic_name__ ( self : Optional[int]): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(UpperCamelCase__ , torch.nn.Linear): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta) def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = BitsAndBytesConfig() snake_case__ = True snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) def __magic_name__ ( self : Optional[int]): '''simple docstring''' with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(UpperCamelCase__) def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = BitsAndBytesConfig() with self.assertRaises(UpperCamelCase__): snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def __magic_name__ ( self : List[Any]): '''simple docstring''' with self.assertRaises(UpperCamelCase__): # Tries with `str` self.model_abit.to("""cpu""") with self.assertRaises(UpperCamelCase__): # Tries with a `dtype`` self.model_abit.to(torch.floataa) with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""")) with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.float() with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.half() # Test if we did not break anything snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = self.model_fpaa.to(torch.floataa) snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) # Check this does not throw an error snake_case__ = self.model_fpaa.to("""cpu""") # Check this does not throw an error snake_case__ = self.model_fpaa.half() # Check this does not throw an error snake_case__ = self.model_fpaa.float() def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @classmethod def __magic_name__ ( cls : Optional[Any]): '''simple docstring''' snake_case__ = """t5-small""" snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense snake_case__ = AutoTokenizer.from_pretrained(cls.model_name) snake_case__ = """Translate in German: Hello, my dog is cute""" def __magic_name__ ( self : Optional[int]): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Any): '''simple docstring''' from transformers import TaForConditionalGeneration snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules snake_case__ = None # test with `t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) # test with `flan-t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) snake_case__ = modules def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit)) snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) # test with `flan-t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : int): '''simple docstring''' super().setUp() # model_name snake_case__ = """bigscience/bloom-560m""" snake_case__ = """t5-small""" # Different types of model snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # Sequence classification model snake_case__ = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # CausalLM model snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # Seq2seq model snake_case__ = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") def __magic_name__ ( self : List[str]): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Tuple): '''simple docstring''' super().setUp() def __magic_name__ ( self : int): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Tuple): '''simple docstring''' snake_case__ = pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass snake_case__ = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' super().setUp() def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""") # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1}) # Check that inference pass works on the model snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") # Second real batch snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = """facebook/opt-350m""" super().setUp() def __magic_name__ ( self : Any): '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""): return # Step 1: freeze all parameters snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__) self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()}) for param in model.parameters(): snake_case__ = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability snake_case__ = param.data.to(torch.floataa) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(UpperCamelCase__)): snake_case__ = LoRALayer(module.q_proj , rank=1_6) snake_case__ = LoRALayer(module.k_proj , rank=1_6) snake_case__ = LoRALayer(module.v_proj , rank=1_6) # Step 3: dummy batch snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): snake_case__ = model.forward(**UpperCamelCase__) out.logits.norm().backward() for module in model.modules(): if isinstance(UpperCamelCase__ , UpperCamelCase__): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(UpperCamelCase__ , nn.Embedding): self.assertTrue(module.weight.grad is None) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : List[Any] = '''gpt2-xl''' _lowercase : Any = 3.3191_8548_5415_2187
654
0
'''simple docstring''' from __future__ import annotations def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ): __a : Optional[int] = [True] * limit __a : int = False __a : Any = False __a : int = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): __a : Dict = i * 2 while index < limit: __a : int = False __a : List[str] = index + i __a : Any = [2] for i in range(3 , __lowerCAmelCase , 2 ): if is_prime[i]: primes.append(__lowerCAmelCase ) return primes def __UpperCamelCase ( lowerCAmelCase__ : Tuple = 1_0_0_0_0_0_0 ): __a : List[Any] = prime_sieve(__lowerCAmelCase ) __a : Dict = 0 __a : Tuple = 0 for i in range(len(__lowerCAmelCase ) ): for j in range(i + length , len(__lowerCAmelCase ) ): __a : Tuple = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: __a : Any = j - i __a : Dict = sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
712
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class UpperCamelCase__ ( unittest.TestCase ): def __init__(self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int]=7 , snake_case_ : List[str]=3 , snake_case_ : List[str]=3_0 , snake_case_ : Union[str, Any]=4_0_0 , snake_case_ : Optional[Any]=True , snake_case_ : Tuple=None , snake_case_ : List[Any]=True , snake_case_ : Tuple=[0.5, 0.5, 0.5] , snake_case_ : Optional[int]=[0.5, 0.5, 0.5] , snake_case_ : Dict=True , snake_case_ : Any=1 / 2_5_5 , snake_case_ : Any=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __a : Optional[Any] = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} __a : List[Any] = parent __a : Optional[Any] = batch_size __a : int = num_channels __a : Any = min_resolution __a : Optional[Any] = max_resolution __a : List[str] = do_resize __a : Optional[int] = size __a : Dict = do_normalize __a : Any = image_mean __a : Tuple = image_std __a : Union[str, Any] = do_rescale __a : Union[str, Any] = rescale_factor __a : List[Any] = do_pad def lowerCAmelCase (self : str ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase (self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=False ): if not batched: __a : str = image_inputs[0] if isinstance(snake_case_ , Image.Image ): __a , __a : Tuple = image.size else: __a , __a : Tuple = image.shape[1], image.shape[2] if w < h: __a : int = int(self.size['''shortest_edge'''] * h / w ) __a : Any = self.size['''shortest_edge'''] elif w > h: __a : Tuple = self.size['''shortest_edge'''] __a : int = int(self.size['''shortest_edge'''] * w / h ) else: __a : List[Any] = self.size['''shortest_edge'''] __a : Dict = self.size['''shortest_edge'''] else: __a : Union[str, Any] = [] for image in image_inputs: __a , __a : List[str] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __a : Union[str, Any] = max(snake_case_ , key=lambda snake_case_ : item[0] )[0] __a : Any = max(snake_case_ , key=lambda snake_case_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase__ ( __lowercase ,unittest.TestCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = YolosImageProcessor if is_vision_available() else None def lowerCAmelCase (self : Any ): __a : Any = YolosImageProcessingTester(self ) @property def lowerCAmelCase (self : int ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase (self : Optional[int] ): __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case_ , '''image_mean''' ) ) self.assertTrue(hasattr(snake_case_ , '''image_std''' ) ) self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) ) self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) ) self.assertTrue(hasattr(snake_case_ , '''size''' ) ) def lowerCAmelCase (self : Union[str, Any] ): __a : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} ) self.assertEqual(image_processor.do_pad , snake_case_ ) __a : int = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=snake_case_ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} ) self.assertEqual(image_processor.do_pad , snake_case_ ) def lowerCAmelCase (self : str ): pass def lowerCAmelCase (self : Optional[Any] ): # Initialize image_processing __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , Image.Image ) # Test not batched input __a : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __a , __a : int = self.image_processor_tester.get_expected_values(snake_case_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a , __a : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ ) __a : str = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase (self : str ): # Initialize image_processing __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , np.ndarray ) # Test not batched input __a : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __a , __a : Dict = self.image_processor_tester.get_expected_values(snake_case_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : int = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values __a , __a : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase (self : Optional[Any] ): # Initialize image_processing __a : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , torch.Tensor ) # Test not batched input __a : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __a , __a : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : Tuple = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values __a , __a : Union[str, Any] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase (self : Any ): # Initialize image_processings __a : Any = self.image_processing_class(**self.image_processor_dict ) __a : str = self.image_processing_class(do_resize=snake_case_ , do_normalize=snake_case_ , do_rescale=snake_case_ ) # create random PyTorch tensors __a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors __a : List[Any] = image_processing_a.pad(snake_case_ , return_tensors='''pt''' ) __a : Union[str, Any] = image_processing_a(snake_case_ , return_tensors='''pt''' ) self.assertTrue( torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) ) @slow def lowerCAmelCase (self : List[str] ): # prepare image and target __a : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: __a : str = json.loads(f.read() ) __a : Dict = {'''image_id''': 3_9_7_6_9, '''annotations''': target} # encode them __a : Optional[Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' ) __a : Tuple = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors='''pt''' ) # verify pixel values __a : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['''pixel_values'''].shape , snake_case_ ) __a : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) ) # verify area __a : int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , snake_case_ ) ) # verify boxes __a : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , snake_case_ ) __a : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , snake_case_ , atol=1E-3 ) ) # verify image_id __a : Optional[Any] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , snake_case_ ) ) # verify is_crowd __a : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , snake_case_ ) ) # verify class_labels __a : Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , snake_case_ ) ) # verify orig_size __a : Optional[int] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , snake_case_ ) ) # verify size __a : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , snake_case_ ) ) @slow def lowerCAmelCase (self : Optional[int] ): # prepare image, target and masks_path __a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: __a : int = json.loads(f.read() ) __a : Optional[int] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target} __a : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them __a : Any = YolosImageProcessor(format='''coco_panoptic''' ) __a : Any = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors='''pt''' ) # verify pixel values __a : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['''pixel_values'''].shape , snake_case_ ) __a : str = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) ) # verify area __a : Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , snake_case_ ) ) # verify boxes __a : int = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , snake_case_ ) __a : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , snake_case_ , atol=1E-3 ) ) # verify image_id __a : Dict = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , snake_case_ ) ) # verify is_crowd __a : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , snake_case_ ) ) # verify class_labels __a : Any = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , snake_case_ ) ) # verify masks __a : Tuple = 8_2_2_8_7_3 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , snake_case_ ) # verify orig_size __a : Any = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , snake_case_ ) ) # verify size __a : List[str] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , snake_case_ ) )
326
0
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class lowerCamelCase_: '''simple docstring''' def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=True , lowerCamelCase__=None , ): _lowerCamelCase = parent _lowerCamelCase = batch_size _lowerCamelCase = seq_length _lowerCamelCase = is_training _lowerCamelCase = use_input_mask _lowerCamelCase = vocab_size _lowerCamelCase = hidden_size _lowerCamelCase = num_hidden_layers _lowerCamelCase = num_attention_heads _lowerCamelCase = intermediate_size _lowerCamelCase = hidden_act _lowerCamelCase = hidden_dropout_prob _lowerCamelCase = attention_probs_dropout_prob _lowerCamelCase = max_position_embeddings _lowerCamelCase = initializer_range _lowerCamelCase = use_labels _lowerCamelCase = scope def snake_case__ ( self ): _lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase = None if self.use_input_mask: _lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: _lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase = self.get_config() return config, input_ids, input_mask, token_labels def snake_case__ ( self ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) def snake_case__ ( self ): ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) = self.prepare_config_and_inputs() _lowerCamelCase = True _lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ , ): _lowerCamelCase = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) _lowerCamelCase = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ , ): _lowerCamelCase = True _lowerCamelCase = BertGenerationEncoder(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _lowerCamelCase = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , ) _lowerCamelCase = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ , ): _lowerCamelCase = True _lowerCamelCase = True _lowerCamelCase = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval() # first forward pass _lowerCamelCase = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ , ) _lowerCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 ) _lowerCamelCase = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )['''hidden_states'''][0] _lowerCamelCase = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )['''hidden_states'''][0] # select random slice _lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _lowerCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , ): _lowerCamelCase = BertGenerationDecoder(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self ): _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.prepare_config_and_inputs() _lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase_( A__, A__, A__, unittest.TestCase ): '''simple docstring''' lowercase__ : Optional[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase__ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else () lowercase__ : List[Any] = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def snake_case__ ( self ): _lowerCamelCase = BertGenerationEncoderTester(self ) _lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 ) def snake_case__ ( self ): self.config_tester.run_common_tests() def snake_case__ ( self ): _lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def snake_case__ ( self ): _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs() _lowerCamelCase = '''bert''' self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def snake_case__ ( self ): _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ ) def snake_case__ ( self ): _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ ) def snake_case__ ( self ): # This regression test was failing with PyTorch < 1.3 ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() _lowerCamelCase = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) def snake_case__ ( self ): _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ ) @slow def snake_case__ ( self ): _lowerCamelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch class lowerCamelCase_( unittest.TestCase ): '''simple docstring''' @slow def snake_case__ ( self ): _lowerCamelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) _lowerCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): _lowerCamelCase = model(lowerCamelCase__ )[0] _lowerCamelCase = torch.Size([1, 8, 1_0_2_4] ) self.assertEqual(output.shape , lowerCamelCase__ ) _lowerCamelCase = torch.tensor( [[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) @require_torch class lowerCamelCase_( unittest.TestCase ): '''simple docstring''' @slow def snake_case__ ( self ): _lowerCamelCase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) _lowerCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): _lowerCamelCase = model(lowerCamelCase__ )[0] _lowerCamelCase = torch.Size([1, 8, 5_0_3_5_8] ) self.assertEqual(output.shape , lowerCamelCase__ ) _lowerCamelCase = torch.tensor( [[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
661
"""simple docstring""" from __future__ import annotations from math import ceil, floor, sqrt def lowerCAmelCase_( lowercase_ : int = 2_00_00_00 ) -> int: _lowerCamelCase = [0] _lowerCamelCase = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target _lowerCamelCase = 0 # the area corresponding to the grid that gives the product closest to target _lowerCamelCase = 0 # an estimate of b, using the quadratic formula _lowerCamelCase = 42 # the largest integer less than b_estimate _lowerCamelCase = 42 # the largest integer less than b_estimate _lowerCamelCase = 42 # the triangle number corresponding to b_floor _lowerCamelCase = 42 # the triangle number corresponding to b_ceil _lowerCamelCase = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): _lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 _lowerCamelCase = floor(lowercase_ ) _lowerCamelCase = ceil(lowercase_ ) _lowerCamelCase = triangle_numbers[b_floor] _lowerCamelCase = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): _lowerCamelCase = triangle_b_first_guess * triangle_a _lowerCamelCase = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): _lowerCamelCase = triangle_b_second_guess * triangle_a _lowerCamelCase = idx_a * b_ceil return area if __name__ == "__main__": print(F"""{solution() = }""")
661
1
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--txt2img_unclip', default='kakaobrain/karlo-v1-alpha', type=str, required=False, help='The pretrained txt2img unclip.', ) SCREAMING_SNAKE_CASE__ = parser.parse_args() SCREAMING_SNAKE_CASE__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) SCREAMING_SNAKE_CASE__ = CLIPImageProcessor() SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14') SCREAMING_SNAKE_CASE__ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
710
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## SCREAMING_SNAKE_CASE__ = 1_6 SCREAMING_SNAKE_CASE__ = 3_2 def lowercase__ ( __UpperCamelCase , __UpperCamelCase = 16 )-> Dict: UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) UpperCamelCase = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase = 16 elif accelerator.mixed_precision != "no": UpperCamelCase = 8 else: UpperCamelCase = None return tokenizer.pad( __UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. UpperCamelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) UpperCamelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders SCREAMING_SNAKE_CASE__ = mocked_dataloaders # noqa: F811 def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]: # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1": UpperCamelCase = 2 # New Code # UpperCamelCase = int(args.gradient_accumulation_steps ) UpperCamelCase = int(args.local_sgd_steps ) # Initialize accelerator UpperCamelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase = config["""lr"""] UpperCamelCase = int(config["""num_epochs"""] ) UpperCamelCase = int(config["""seed"""] ) UpperCamelCase = int(config["""batch_size"""] ) UpperCamelCase = evaluate.load("""glue""" , """mrpc""" ) set_seed(__UpperCamelCase ) UpperCamelCase ,UpperCamelCase = get_dataloaders(__UpperCamelCase , __UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer UpperCamelCase = AdamW(params=model.parameters() , lr=__UpperCamelCase ) # Instantiate scheduler UpperCamelCase = get_linear_schedule_with_warmup( optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = accelerator.prepare( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Now we train the model for epoch in range(__UpperCamelCase ): model.train() with LocalSGD( accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__UpperCamelCase ): UpperCamelCase = model(**__UpperCamelCase ) UpperCamelCase = output.loss accelerator.backward(__UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase = model(**__UpperCamelCase ) UpperCamelCase = outputs.logits.argmax(dim=-1 ) UpperCamelCase ,UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__UpperCamelCase , references=__UpperCamelCase , ) UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , __UpperCamelCase ) def lowercase__ ( )-> List[Any]: UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=__UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument( """--local_sgd_steps""" , type=__UpperCamelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) UpperCamelCase = parser.parse_args() UpperCamelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
35
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
559
import requests from bsa import BeautifulSoup def lowerCAmelCase( __lowerCamelCase = "AAPL" ): __a = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' __a = BeautifulSoup(requests.get(__lowerCamelCase ).text , 'html.parser' ) __a = 'My(6px) Pos(r) smartphone_Mt(6px)' return soup.find('div' , class_=class_ ).find('span' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
559
1
__UpperCAmelCase = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] __UpperCAmelCase = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] __UpperCAmelCase = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] __UpperCAmelCase = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] __UpperCAmelCase = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] __UpperCAmelCase = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] __UpperCAmelCase = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] __UpperCAmelCase = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
218
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(a_ ) class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" super().__init__(*lowerCAmelCase , **lowerCAmelCase ) self.check_model_type(lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Any ) -> Tuple: """simple docstring""" __lowerCAmelCase ,__lowerCAmelCase : List[Any] = {}, {} if padding is not None: __lowerCAmelCase : List[Any] = padding if truncation is not None: __lowerCAmelCase : int = truncation if top_k is not None: __lowerCAmelCase : int = top_k return preprocess_params, {}, postprocess_params def __call__( self : Optional[int] , lowerCAmelCase : Union["Image.Image", str] , lowerCAmelCase : str = None , **lowerCAmelCase : List[str] ) -> Any: """simple docstring""" if isinstance(lowerCAmelCase , (Image.Image, str) ) and isinstance(lowerCAmelCase , lowerCAmelCase ): __lowerCAmelCase : Any = {"""image""": image, """question""": question} else: __lowerCAmelCase : List[str] = image __lowerCAmelCase : str = super().__call__(lowerCAmelCase , **lowerCAmelCase ) return results def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=False ) -> Any: """simple docstring""" __lowerCAmelCase : int = load_image(inputs["""image"""] ) __lowerCAmelCase : Optional[Any] = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase , truncation=lowerCAmelCase ) __lowerCAmelCase : List[Any] = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) return model_inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : List[str] ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : List[str] = self.model(**lowerCAmelCase ) return model_outputs def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=5 ) -> Union[str, Any]: """simple docstring""" if top_k > self.model.config.num_labels: __lowerCAmelCase : Any = self.model.config.num_labels if self.framework == "pt": __lowerCAmelCase : Optional[Any] = model_outputs.logits.sigmoid()[0] __lowerCAmelCase ,__lowerCAmelCase : Tuple = probs.topk(lowerCAmelCase ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) __lowerCAmelCase : Any = scores.tolist() __lowerCAmelCase : int = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase , lowerCAmelCase )]
218
1
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ): # This function is recursive lowerCamelCase_ : Dict = len(lowerCAmelCase__ ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowerCamelCase_ : Dict = array[0] lowerCamelCase_ : Optional[Any] = False lowerCamelCase_ : str = 1 lowerCamelCase_ : list[int] = [] while not is_found and i < array_length: if array[i] < pivot: lowerCamelCase_ : List[Any] = True lowerCamelCase_ : List[Any] = [element for element in array[i:] if element >= array[i]] lowerCamelCase_ : Optional[int] = longest_subsequence(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > len(lowerCAmelCase__ ): lowerCamelCase_ : int = temp_array else: i += 1 lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot] lowerCamelCase_ : Dict = [pivot, *longest_subsequence(lowerCAmelCase__ )] if len(lowerCAmelCase__ ) > len(lowerCAmelCase__ ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
364
import os import sys _lowercase : List[str] =os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) _lowercase : Optional[int] =[ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ): return AutoConfig.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ): return AutoTokenizer.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ ) @add_start_docstrings(AutoModel.__doc__ ) def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ): return AutoModel.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ): return AutoModelForCausalLM.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ): return AutoModelForMaskedLM.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ): return AutoModelForSequenceClassification.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ,**lowerCAmelCase__ ): return AutoModelForQuestionAnswering.from_pretrained(*lowerCAmelCase__ ,**lowerCAmelCase__ )
364
1
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE__ =FlaxAutoencoderKL @property def __lowerCAmelCase ( self ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = (32, 32) __SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE = jax.random.uniform(_a, ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def __lowerCAmelCase ( self ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } __SCREAMING_SNAKE_CASE = self.dummy_input return init_dict, inputs_dict
214
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _snake_case : List[Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =["""pixel_values"""] def __init__( self, _a = True, _a = None, _a = PILImageResampling.BILINEAR, _a = True, _a = None, _a = True, _a = 1 / 2_55, _a = True, _a = None, _a = None, **_a, ) -> None: super().__init__(**_a ) __SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_56} __SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a ) __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} __SCREAMING_SNAKE_CASE = get_size_dict(_a ) __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = resample __SCREAMING_SNAKE_CASE = do_center_crop __SCREAMING_SNAKE_CASE = crop_size __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self, _a, _a, _a = PILImageResampling.BICUBIC, _a = None, **_a, ) -> np.ndarray: __SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __SCREAMING_SNAKE_CASE = get_resize_output_image_size(_a, size=size["shortest_edge"], default_to_square=_a ) return resize(_a, size=_a, resample=_a, data_format=_a, **_a ) def __lowerCAmelCase ( self, _a, _a, _a = None, **_a, ) -> np.ndarray: __SCREAMING_SNAKE_CASE = get_size_dict(_a ) return center_crop(_a, size=(size["height"], size["width"]), data_format=_a, **_a ) def __lowerCAmelCase ( self, _a, _a, _a = None, **_a ) -> np.ndarray: return rescale(_a, scale=_a, data_format=_a, **_a ) def __lowerCAmelCase ( self, _a, _a, _a, _a = None, **_a, ) -> np.ndarray: return normalize(_a, mean=_a, std=_a, data_format=_a, **_a ) def __lowerCAmelCase ( self, _a, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = ChannelDimension.FIRST, **_a, ) -> int: __SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE = size if size is not None else self.size __SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a ) __SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size __SCREAMING_SNAKE_CASE = get_size_dict(_a ) __SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor __SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean __SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std __SCREAMING_SNAKE_CASE = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE = [to_numpy_array(_a ) for image in images] if do_resize: __SCREAMING_SNAKE_CASE = [self.resize(image=_a, size=_a, resample=_a ) for image in images] if do_center_crop: __SCREAMING_SNAKE_CASE = [self.center_crop(image=_a, size=_a ) for image in images] if do_rescale: __SCREAMING_SNAKE_CASE = [self.rescale(image=_a, scale=_a ) for image in images] if do_normalize: __SCREAMING_SNAKE_CASE = [self.normalize(image=_a, mean=_a, std=_a ) for image in images] __SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_a, _a ) for image in images] __SCREAMING_SNAKE_CASE = {"pixel_values": images} return BatchFeature(data=_a, tensor_type=_a )
214
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { "edbeeching/decision-transformer-gym-hopper-medium": ( "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : List[Any] = '''decision_transformer''' __lowercase : str = ['''past_key_values'''] __lowercase : Any = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , lowerCAmelCase__=1_7 , lowerCAmelCase__=4 , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=3 , lowerCAmelCase__=1 , lowerCAmelCase__=None , lowerCAmelCase__="relu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=5_0_2_5_6 , lowerCAmelCase__=5_0_2_5_6 , lowerCAmelCase__=False , lowerCAmelCase__=False , **lowerCAmelCase__ , ): __SCREAMING_SNAKE_CASE = state_dim __SCREAMING_SNAKE_CASE = act_dim __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = max_ep_len __SCREAMING_SNAKE_CASE = action_tanh __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = n_positions __SCREAMING_SNAKE_CASE = n_layer __SCREAMING_SNAKE_CASE = n_head __SCREAMING_SNAKE_CASE = n_inner __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = resid_pdrop __SCREAMING_SNAKE_CASE = embd_pdrop __SCREAMING_SNAKE_CASE = attn_pdrop __SCREAMING_SNAKE_CASE = layer_norm_epsilon __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = scale_attn_weights __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx __SCREAMING_SNAKE_CASE = reorder_and_upcast_attn __SCREAMING_SNAKE_CASE = bos_token_id __SCREAMING_SNAKE_CASE = eos_token_id super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
155
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def _lowerCAmelCase ( UpperCamelCase_ ): return np.dot(UpperCamelCase_ , UpperCamelCase_ ) class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , *, lowerCAmelCase__ = np.inf , lowerCAmelCase__ = "linear" , lowerCAmelCase__ = 0.0 , ): __SCREAMING_SNAKE_CASE = regularization __SCREAMING_SNAKE_CASE = gamma if kernel == "linear": __SCREAMING_SNAKE_CASE = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("""rbf kernel requires gamma""") if not isinstance(self.gamma , (float, int)): raise ValueError("""gamma must be float or int""") if not self.gamma > 0: raise ValueError("""gamma must be > 0""") __SCREAMING_SNAKE_CASE = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: __SCREAMING_SNAKE_CASE = f"Unknown kernel: {kernel}" raise ValueError(lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): return np.dot(lowerCAmelCase__ , lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = observations __SCREAMING_SNAKE_CASE = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((__SCREAMING_SNAKE_CASE) ,) = np.shape(lowerCAmelCase__) def to_minimize(lowerCAmelCase__) -> float: __SCREAMING_SNAKE_CASE = 0 ((__SCREAMING_SNAKE_CASE) ,) = np.shape(lowerCAmelCase__) for i in range(lowerCAmelCase__): for j in range(lowerCAmelCase__): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = LinearConstraint(lowerCAmelCase__ , 0 , 0) __SCREAMING_SNAKE_CASE = Bounds(0 , self.regularization) __SCREAMING_SNAKE_CASE = minimize( lowerCAmelCase__ , np.ones(lowerCAmelCase__) , bounds=lowerCAmelCase__ , constraints=[ly_contraint]).x __SCREAMING_SNAKE_CASE = l_star # calculating mean offset of separation plane to points __SCREAMING_SNAKE_CASE = 0 for i in range(lowerCAmelCase__): for j in range(lowerCAmelCase__): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) __SCREAMING_SNAKE_CASE = s / n def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , lowerCAmelCase__) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
155
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase = logging.get_logger(__name__) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" snake_case__ = ["pixel_values"] def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> None: super().__init__(**UpperCamelCase_ ) lowerCAmelCase__ = size if size is not None else {'shortest_edge': 256} lowerCAmelCase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowerCAmelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowerCAmelCase__ = get_size_dict(UpperCamelCase_ ) lowerCAmelCase__ = do_resize lowerCAmelCase__ = size lowerCAmelCase__ = resample lowerCAmelCase__ = do_center_crop lowerCAmelCase__ = crop_size lowerCAmelCase__ = do_rescale lowerCAmelCase__ = rescale_factor lowerCAmelCase__ = do_normalize lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def a ( self : int , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Any , ) -> np.ndarray: lowerCAmelCase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) lowerCAmelCase__ = get_resize_output_image_size(UpperCamelCase_ , size=size["shortest_edge"] , default_to_square=UpperCamelCase_ ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> np.ndarray: lowerCAmelCase__ = get_size_dict(UpperCamelCase_ ) return center_crop(UpperCamelCase_ , size=(size["height"], size["width"]) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> np.ndarray: return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> np.ndarray: return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def a ( self : str , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Dict: lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ = size if size is not None else self.size lowerCAmelCase__ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowerCAmelCase__ = resample if resample is not None else self.resample lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ = get_size_dict(UpperCamelCase_ ) lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ = image_std if image_std is not None else self.image_std lowerCAmelCase__ = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowerCAmelCase__ = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: lowerCAmelCase__ = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: lowerCAmelCase__ = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: lowerCAmelCase__ = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: lowerCAmelCase__ = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] lowerCAmelCase__ = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] lowerCAmelCase__ = {'pixel_values': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
704
def _A ( lowerCAmelCase_ : int = 100_0000 ): """simple docstring""" lowerCAmelCase__ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , lowerCAmelCase_ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
125
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=A ): '''simple docstring''' lowercase_ : Optional[int] = ["torch", "transformers", "onnx"] def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : List[str] ): '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : Optional[int] , *snake_case__ : Any , **snake_case__ : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : List[str] , *snake_case__ : Any , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] ) class UpperCAmelCase_ ( metaclass=A ): '''simple docstring''' lowercase_ : Tuple = ["torch", "transformers", "onnx"] def __init__( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Any ): '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : Dict , *snake_case__ : Any , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : int , *snake_case__ : Dict , **snake_case__ : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] ) class UpperCAmelCase_ ( metaclass=A ): '''simple docstring''' lowercase_ : Optional[int] = ["torch", "transformers", "onnx"] def __init__( self : str , *snake_case__ : Optional[int] , **snake_case__ : str ): '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : Any , *snake_case__ : List[str] , **snake_case__ : Dict ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : str , *snake_case__ : Dict , **snake_case__ : Dict ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] ) class UpperCAmelCase_ ( metaclass=A ): '''simple docstring''' lowercase_ : int = ["torch", "transformers", "onnx"] def __init__( self : Any , *snake_case__ : List[str] , **snake_case__ : str ): '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : List[str] , *snake_case__ : List[str] , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : Optional[int] , *snake_case__ : List[Any] , **snake_case__ : int ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] ) class UpperCAmelCase_ ( metaclass=A ): '''simple docstring''' lowercase_ : Dict = ["torch", "transformers", "onnx"] def __init__( self : Any , *snake_case__ : Optional[int] , **snake_case__ : Any ): '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : Union[str, Any] , *snake_case__ : str , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : int , *snake_case__ : int , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] ) class UpperCAmelCase_ ( metaclass=A ): '''simple docstring''' lowercase_ : Dict = ["torch", "transformers", "onnx"] def __init__( self : List[str] , *snake_case__ : Tuple , **snake_case__ : int ): '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : Optional[int] , *snake_case__ : Tuple , **snake_case__ : Dict ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def UpperCamelCase ( cls : List[str] , *snake_case__ : Dict , **snake_case__ : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"] )
199
'''simple docstring''' import operator as op def snake_case_ ( lowercase__ ): UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Any = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation UpperCAmelCase__ : Any = { "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ) , "Action".center(1_2 ) , "Stack" , sep=" | " ) print("-" * (3_0 + len(lowercase__ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(lowercase__ ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("push(" + x + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " ) else: UpperCAmelCase__ : str = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + b + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " ) UpperCAmelCase__ : List[Any] = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + a + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " ) stack.append( str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " , ) return int(stack[0] ) if __name__ == "__main__": SCREAMING_SNAKE_CASE = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """) print("""\n\tResult = """, solve(Postfix))
199
1
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[Any] ="ylacombe/bark-small" lowerCamelCase__: int =tempfile.mkdtemp() lowerCamelCase__: Optional[int] ="en_speaker_1" lowerCamelCase__: Optional[int] ="This is a test string" lowerCamelCase__: int ="speaker_embeddings_path.json" lowerCamelCase__: List[str] ="speaker_embeddings" def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Dict) ->Union[str, Any]: '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase) def SCREAMING_SNAKE_CASE_ (self : int) ->Dict: '''simple docstring''' shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : Any) ->List[Any]: '''simple docstring''' lowerCamelCase__: List[str] =self.get_tokenizer() lowerCamelCase__: Any =BarkProcessor(tokenizer=__lowerCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCamelCase__: List[str] =BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) @slow def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict: '''simple docstring''' lowerCamelCase__: Any =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowerCamelCase__: List[Any] =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") lowerCamelCase__: Union[str, Any] =BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict: '''simple docstring''' lowerCamelCase__: Optional[Any] =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowerCamelCase__: Optional[int] =35 lowerCamelCase__: Any =2 lowerCamelCase__: Optional[Any] =8 lowerCamelCase__: Optional[Any] ={ "semantic_prompt": np.ones(__lowerCAmelCase), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)), "fine_prompt": np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset lowerCamelCase__: int =processor(text=self.input_string , voice_preset=__lowerCAmelCase) lowerCamelCase__: Dict =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([])).tolist()) # test loading voice preset from npz file lowerCamelCase__: int =os.path.join(self.tmpdirname , "file.npz") np.savez(__lowerCAmelCase , **__lowerCAmelCase) lowerCamelCase__: Optional[Any] =processor(text=self.input_string , voice_preset=__lowerCAmelCase) lowerCamelCase__: Optional[Any] =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([])).tolist()) # test loading voice preset from the hub lowerCamelCase__: Optional[Any] =processor(text=self.input_string , voice_preset=self.voice_preset) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: int =self.get_tokenizer() lowerCamelCase__: Tuple =BarkProcessor(tokenizer=__lowerCAmelCase) lowerCamelCase__: Optional[Any] =processor(text=self.input_string) lowerCamelCase__: List[str] =tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
710
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = DiTPipeline lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowercase_ = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowercase_ = False def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]: '''simple docstring''' torch.manual_seed(0) lowerCamelCase__: Optional[int] =TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase_ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase_ , ) lowerCamelCase__: List[str] =AutoencoderKL() lowerCamelCase__: Tuple =DDIMScheduler() lowerCamelCase__: Tuple ={"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=0) ->List[Any]: '''simple docstring''' if str(UpperCAmelCase_).startswith("mps"): lowerCamelCase__: Any =torch.manual_seed(UpperCAmelCase_) else: lowerCamelCase__: Union[str, Any] =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_) lowerCamelCase__: List[Any] ={ "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: List[str] ="cpu" lowerCamelCase__: Union[str, Any] =self.get_dummy_components() lowerCamelCase__: List[str] =self.pipeline_class(**UpperCAmelCase_) pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) lowerCamelCase__: List[str] =self.get_dummy_inputs(UpperCAmelCase_) lowerCamelCase__: Any =pipe(**UpperCAmelCase_).images lowerCamelCase__: List[str] =image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3)) lowerCamelCase__: Optional[Any] =np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457]) lowerCamelCase__: List[Any] =np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(UpperCAmelCase_ , 1E-3) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str: '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase_ , expected_max_diff=1E-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def SCREAMING_SNAKE_CASE_ (self : Dict) ->int: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @require_torch_gpu @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple: '''simple docstring''' lowerCamelCase__: List[Any] =torch.manual_seed(0) lowerCamelCase__: Tuple =DiTPipeline.from_pretrained("facebook/DiT-XL-2-256") pipe.to("cuda") lowerCamelCase__: Any =["vase", "umbrella", "white shark", "white wolf"] lowerCamelCase__: Any =pipe.get_label_ids(UpperCAmelCase_) lowerCamelCase__: Any =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=40 , output_type="np").images for word, image in zip(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: Optional[int] =load_numpy( F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""") assert np.abs((expected_image - image).max()) < 1E-2 def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple: '''simple docstring''' lowerCamelCase__: Union[str, Any] =DiTPipeline.from_pretrained("facebook/DiT-XL-2-512") lowerCamelCase__: List[str] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") lowerCamelCase__: List[str] =["vase", "umbrella"] lowerCamelCase__: List[Any] =pipe.get_label_ids(UpperCAmelCase_) lowerCamelCase__: str =torch.manual_seed(0) lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="np").images for word, image in zip(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: str =load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" F"""/dit/{word}_512.npy""") assert np.abs((expected_image - image).max()) < 1E-1
437
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: UpperCamelCase = None UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} UpperCamelCase = { "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json" ), }, } UpperCamelCase = { "moussaKam/mbarthez": 1_024, "moussaKam/barthez": 1_024, "moussaKam/barthez-orangesum-title": 1_024, } UpperCamelCase = "▁" class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : List[str] = VOCAB_FILES_NAMES _snake_case : Any = PRETRAINED_VOCAB_FILES_MAP _snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Any = ["""input_ids""", """attention_mask"""] _snake_case : Tuple = BarthezTokenizer def __init__( self :Union[str, Any] , lowerCamelCase__ :str=None , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :List[Any]="<s>" , lowerCamelCase__ :Optional[int]="</s>" , lowerCamelCase__ :Dict="</s>" , lowerCamelCase__ :Optional[int]="<s>" , lowerCamelCase__ :List[Any]="<unk>" , lowerCamelCase__ :Union[str, Any]="<pad>" , lowerCamelCase__ :int="<mask>" , **lowerCamelCase__ :Any , ): # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase__ :Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token super().__init__( lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , ) UpperCamelCase__ :int = vocab_file UpperCamelCase__ :Optional[int] = False if not self.vocab_file else True def __a ( self :Optional[Any] , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase__ :List[Any] = [self.cls_token_id] UpperCamelCase__ :Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self :List[Any] , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ): UpperCamelCase__ :Union[str, Any] = [self.sep_token_id] UpperCamelCase__ :List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowerCamelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase__ :Any = os.path.join( lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ): copyfile(self.vocab_file , lowerCamelCase__ ) return (out_vocab_file,)
45
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset from utils import logger class __magic_name__ ( SCREAMING_SNAKE_CASE__ ): def __init__( self , A_ , A_ ) -> List[str]: """simple docstring""" _lowercase: List[str] = params _lowercase: str = np.array(A_ ) _lowercase: Optional[int] = np.array([len(A_ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , A_ ) -> Optional[Any]: """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> List[str]: """simple docstring""" return len(self.lengths ) def lowercase_ ( self ) -> Dict: """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def lowercase_ ( self ) -> str: """simple docstring""" _lowercase: Tuple = self.params.max_model_input_size _lowercase: Tuple = self.lengths > max_len logger.info(f'''Splitting {sum(A_ )} too long sequences.''' ) def divide_chunks(A_ , A_ ): return [l[i : i + n] for i in range(0 , len(A_ ) , A_ )] _lowercase: Dict = [] _lowercase: Union[str, Any] = [] if self.params.mlm: _lowercase , _lowercase: int = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: _lowercase , _lowercase: Dict = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: _lowercase: Optional[Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: _lowercase: Any = np.insert(A_ , 0 , A_ ) if sub_s[-1] != sep_id: _lowercase: Optional[int] = np.insert(A_ , len(A_ ) , A_ ) assert len(A_ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(A_ ) new_tok_ids.extend(A_ ) new_lengths.extend([len(A_ ) for l in sub_seqs] ) _lowercase: Optional[Any] = np.array(A_ ) _lowercase: List[str] = np.array(A_ ) def lowercase_ ( self ) -> Tuple: """simple docstring""" _lowercase: List[Any] = len(self ) _lowercase: Optional[Any] = self.lengths > 11 _lowercase: int = self.token_ids[indices] _lowercase: List[str] = self.lengths[indices] _lowercase: Optional[int] = len(self ) logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def lowercase_ ( self ) -> int: """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: _lowercase: Dict = self.params.special_tok_ids['''unk_token'''] _lowercase: int = len(self ) _lowercase: Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) _lowercase: Dict = (unk_occs / self.lengths) < 0.5 _lowercase: str = self.token_ids[indices] _lowercase: Dict = self.lengths[indices] _lowercase: int = len(self ) logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def lowercase_ ( self ) -> Optional[Any]: """simple docstring""" if not self.params.is_master: return logger.info(f'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def lowercase_ ( self , A_ ) -> Optional[int]: """simple docstring""" _lowercase: List[str] = [t[0] for t in batch] _lowercase: Dict = [t[1] for t in batch] assert len(A_ ) == len(A_ ) # Max for paddings _lowercase: Tuple = max(A_ ) # Pad token ids if self.params.mlm: _lowercase: str = self.params.special_tok_ids['''pad_token'''] else: _lowercase: Union[str, Any] = self.params.special_tok_ids['''unk_token'''] _lowercase: int = [list(t.astype(A_ ) ) + [pad_idx] * (max_seq_len_ - len(A_ )) for t in token_ids] assert len(tk_ ) == len(A_ ) assert all(len(A_ ) == max_seq_len_ for t in tk_ ) _lowercase: str = torch.tensor(tk_ ) # (bs, max_seq_len_) _lowercase: int = torch.tensor(A_ ) # (bs) return tk_t, lg_t
353
0
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def A__ ( ): """simple docstring""" _lowerCAmelCase = {} _lowerCAmelCase = 2 while True: _lowerCAmelCase = factor_map.pop(__lowerCamelCase, __lowerCamelCase ) if factor: _lowerCAmelCase = factor + prime while x in factor_map: x += factor _lowerCAmelCase = factor else: _lowerCAmelCase = prime yield prime prime += 1 def A__ ( __lowerCamelCase = 1e10 ): """simple docstring""" _lowerCAmelCase = sieve() _lowerCAmelCase = 1 while True: _lowerCAmelCase = next(__lowerCamelCase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(__lowerCamelCase ) n += 2 if __name__ == "__main__": print(solution())
309
"""simple docstring""" import argparse from collections import defaultdict import yaml a__ : List[str] = """docs/source/en/_toctree.yml""" def A__ ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = defaultdict(__lowerCamelCase ) for doc in model_doc: counts[doc["local"]] += 1 _lowerCAmelCase = [key for key, value in counts.items() if value > 1] _lowerCAmelCase = [] for duplicate_key in duplicates: _lowerCAmelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} ) if len(__lowerCamelCase ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] ) # Sort return sorted(__lowerCamelCase, key=lambda __lowerCamelCase : s["title"].lower() ) def A__ ( __lowerCamelCase=False ): """simple docstring""" with open(__lowerCamelCase, encoding='utf-8' ) as f: _lowerCAmelCase = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase = content[api_idx]['sections'] # Then to the model doc _lowerCAmelCase = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 _lowerCAmelCase = api_doc[model_idx]['sections'] _lowerCAmelCase = [(idx, section) for idx, section in enumerate(__lowerCamelCase ) if 'sections' in section] _lowerCAmelCase = False for idx, modality_doc in modalities_docs: _lowerCAmelCase = modality_doc['sections'] _lowerCAmelCase = clean_model_doc_toc(__lowerCamelCase ) if old_modality_doc != new_modality_doc: _lowerCAmelCase = True if overwrite: _lowerCAmelCase = new_modality_doc if diff: if overwrite: _lowerCAmelCase = model_doc _lowerCAmelCase = api_doc with open(__lowerCamelCase, 'w', encoding='utf-8' ) as f: f.write(yaml.dump(__lowerCamelCase, allow_unicode=__lowerCamelCase ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a__ : str = parser.parse_args() check_model_doc(args.fix_and_overwrite)
309
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker UpperCamelCase__ : str = '''CompVis/stable-diffusion-v1-1''' UpperCamelCase__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2''' UpperCamelCase__ : Optional[int] = '''CompVis/stable-diffusion-v1-3''' UpperCamelCase__ : Union[str, Any] = '''CompVis/stable-diffusion-v1-4''' class lowerCAmelCase_ ( lowerCamelCase_ ): def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = True ,): super()._init_() SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case__ ) SCREAMING_SNAKE_CASE_ : str = StableDiffusionPipeline.from_pretrained(snake_case__ ) SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionPipeline.from_pretrained(snake_case__ ) SCREAMING_SNAKE_CASE_ : Tuple = StableDiffusionPipeline( vae=snake_case__ ,text_encoder=snake_case__ ,tokenizer=snake_case__ ,unet=snake_case__ ,scheduler=snake_case__ ,safety_checker=snake_case__ ,feature_extractor=snake_case__ ,requires_safety_checker=snake_case__ ,) self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ) @property def snake_case ( self ): return {k: getattr(self ,snake_case__ ) for k in self.config.keys() if not k.startswith('_' )} def snake_case ( self ,snake_case__ = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE_ : str = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case__ ) def snake_case ( self ): self.enable_attention_slicing(snake_case__ ) @torch.no_grad() def snake_case ( self ,snake_case__ ,snake_case__ = 512 ,snake_case__ = 512 ,snake_case__ = 50 ,snake_case__ = 7.5 ,snake_case__ = None ,snake_case__ = 1 ,snake_case__ = 0.0 ,snake_case__ = None ,snake_case__ = None ,snake_case__ = "pil" ,snake_case__ = True ,snake_case__ = None ,snake_case__ = 1 ,**snake_case__ ,): return self.pipea( prompt=snake_case__ ,height=snake_case__ ,width=snake_case__ ,num_inference_steps=snake_case__ ,guidance_scale=snake_case__ ,negative_prompt=snake_case__ ,num_images_per_prompt=snake_case__ ,eta=snake_case__ ,generator=snake_case__ ,latents=snake_case__ ,output_type=snake_case__ ,return_dict=snake_case__ ,callback=snake_case__ ,callback_steps=snake_case__ ,**snake_case__ ,) @torch.no_grad() def snake_case ( self ,snake_case__ ,snake_case__ = 512 ,snake_case__ = 512 ,snake_case__ = 50 ,snake_case__ = 7.5 ,snake_case__ = None ,snake_case__ = 1 ,snake_case__ = 0.0 ,snake_case__ = None ,snake_case__ = None ,snake_case__ = "pil" ,snake_case__ = True ,snake_case__ = None ,snake_case__ = 1 ,**snake_case__ ,): return self.pipea( prompt=snake_case__ ,height=snake_case__ ,width=snake_case__ ,num_inference_steps=snake_case__ ,guidance_scale=snake_case__ ,negative_prompt=snake_case__ ,num_images_per_prompt=snake_case__ ,eta=snake_case__ ,generator=snake_case__ ,latents=snake_case__ ,output_type=snake_case__ ,return_dict=snake_case__ ,callback=snake_case__ ,callback_steps=snake_case__ ,**snake_case__ ,) @torch.no_grad() def snake_case ( self ,snake_case__ ,snake_case__ = 512 ,snake_case__ = 512 ,snake_case__ = 50 ,snake_case__ = 7.5 ,snake_case__ = None ,snake_case__ = 1 ,snake_case__ = 0.0 ,snake_case__ = None ,snake_case__ = None ,snake_case__ = "pil" ,snake_case__ = True ,snake_case__ = None ,snake_case__ = 1 ,**snake_case__ ,): return self.pipea( prompt=snake_case__ ,height=snake_case__ ,width=snake_case__ ,num_inference_steps=snake_case__ ,guidance_scale=snake_case__ ,negative_prompt=snake_case__ ,num_images_per_prompt=snake_case__ ,eta=snake_case__ ,generator=snake_case__ ,latents=snake_case__ ,output_type=snake_case__ ,return_dict=snake_case__ ,callback=snake_case__ ,callback_steps=snake_case__ ,**snake_case__ ,) @torch.no_grad() def snake_case ( self ,snake_case__ ,snake_case__ = 512 ,snake_case__ = 512 ,snake_case__ = 50 ,snake_case__ = 7.5 ,snake_case__ = None ,snake_case__ = 1 ,snake_case__ = 0.0 ,snake_case__ = None ,snake_case__ = None ,snake_case__ = "pil" ,snake_case__ = True ,snake_case__ = None ,snake_case__ = 1 ,**snake_case__ ,): return self.pipea( prompt=snake_case__ ,height=snake_case__ ,width=snake_case__ ,num_inference_steps=snake_case__ ,guidance_scale=snake_case__ ,negative_prompt=snake_case__ ,num_images_per_prompt=snake_case__ ,eta=snake_case__ ,generator=snake_case__ ,latents=snake_case__ ,output_type=snake_case__ ,return_dict=snake_case__ ,callback=snake_case__ ,callback_steps=snake_case__ ,**snake_case__ ,) @torch.no_grad() def snake_case ( self ,snake_case__ ,snake_case__ = 512 ,snake_case__ = 512 ,snake_case__ = 50 ,snake_case__ = 7.5 ,snake_case__ = None ,snake_case__ = 1 ,snake_case__ = 0.0 ,snake_case__ = None ,snake_case__ = None ,snake_case__ = "pil" ,snake_case__ = True ,snake_case__ = None ,snake_case__ = 1 ,**snake_case__ ,): SCREAMING_SNAKE_CASE_ : Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu' self.to(snake_case__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.' ) # Get first result from Stable Diffusion Checkpoint v1.1 SCREAMING_SNAKE_CASE_ : Any = self.textaimg_sda_a( prompt=snake_case__ ,height=snake_case__ ,width=snake_case__ ,num_inference_steps=snake_case__ ,guidance_scale=snake_case__ ,negative_prompt=snake_case__ ,num_images_per_prompt=snake_case__ ,eta=snake_case__ ,generator=snake_case__ ,latents=snake_case__ ,output_type=snake_case__ ,return_dict=snake_case__ ,callback=snake_case__ ,callback_steps=snake_case__ ,**snake_case__ ,) # Get first result from Stable Diffusion Checkpoint v1.2 SCREAMING_SNAKE_CASE_ : Tuple = self.textaimg_sda_a( prompt=snake_case__ ,height=snake_case__ ,width=snake_case__ ,num_inference_steps=snake_case__ ,guidance_scale=snake_case__ ,negative_prompt=snake_case__ ,num_images_per_prompt=snake_case__ ,eta=snake_case__ ,generator=snake_case__ ,latents=snake_case__ ,output_type=snake_case__ ,return_dict=snake_case__ ,callback=snake_case__ ,callback_steps=snake_case__ ,**snake_case__ ,) # Get first result from Stable Diffusion Checkpoint v1.3 SCREAMING_SNAKE_CASE_ : str = self.textaimg_sda_a( prompt=snake_case__ ,height=snake_case__ ,width=snake_case__ ,num_inference_steps=snake_case__ ,guidance_scale=snake_case__ ,negative_prompt=snake_case__ ,num_images_per_prompt=snake_case__ ,eta=snake_case__ ,generator=snake_case__ ,latents=snake_case__ ,output_type=snake_case__ ,return_dict=snake_case__ ,callback=snake_case__ ,callback_steps=snake_case__ ,**snake_case__ ,) # Get first result from Stable Diffusion Checkpoint v1.4 SCREAMING_SNAKE_CASE_ : str = self.textaimg_sda_a( prompt=snake_case__ ,height=snake_case__ ,width=snake_case__ ,num_inference_steps=snake_case__ ,guidance_scale=snake_case__ ,negative_prompt=snake_case__ ,num_images_per_prompt=snake_case__ ,eta=snake_case__ ,generator=snake_case__ ,latents=snake_case__ ,output_type=snake_case__ ,return_dict=snake_case__ ,callback=snake_case__ ,callback_steps=snake_case__ ,**snake_case__ ,) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
105
"""simple docstring""" class __a : def __init__( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] )-> Optional[int]: """simple docstring""" UpperCamelCase = name UpperCamelCase = value UpperCamelCase = weight def __repr__( self : int )-> Any: """simple docstring""" return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def _SCREAMING_SNAKE_CASE ( self : int )-> Union[str, Any]: """simple docstring""" return self.value def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> int: """simple docstring""" return self.name def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[Any]: """simple docstring""" return self.weight def _SCREAMING_SNAKE_CASE ( self : Any )-> Union[str, Any]: """simple docstring""" return self.value / self.weight def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Union[str, Any]: """simple docstring""" UpperCamelCase = [] for i in range(len(UpperCAmelCase_ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Dict: """simple docstring""" UpperCamelCase = sorted(UpperCAmelCase_ , key=UpperCAmelCase_ , reverse=UpperCAmelCase_ ) UpperCamelCase = [] UpperCamelCase , UpperCamelCase = 0.0, 0.0 for i in range(len(UpperCAmelCase_ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def lowerCamelCase__ ( )-> Dict: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
554
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCAmelCase :Dict = 16 __lowerCAmelCase :Union[str, Any] = 32 def A ( UpperCAmelCase , UpperCAmelCase = 16 ): _snake_case : int = AutoTokenizer.from_pretrained("bert-base-cased" ) _snake_case : Dict = load_dataset("glue" , "mrpc" ) def tokenize_function(UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) _snake_case : Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase , max_length=UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _snake_case : Optional[int] = datasets.map( UpperCAmelCase , batched=UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case : Any = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. _snake_case : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _snake_case : Union[str, Any] = 16 elif accelerator.mixed_precision != "no": _snake_case : int = 8 else: _snake_case : List[str] = None return tokenizer.pad( UpperCAmelCase , padding="longest" , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. _snake_case : Any = DataLoader( tokenized_datasets["train"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase ) _snake_case : List[str] = DataLoader( tokenized_datasets["validation"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowerCAmelCase :Tuple = mocked_dataloaders # noqa: F811 def A ( UpperCAmelCase , UpperCAmelCase ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCAmelCase ) == "1": _snake_case : Optional[int] = 2 # New Code # _snake_case : Any = int(args.gradient_accumulation_steps ) # Initialize accelerator _snake_case : int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case : str = config["lr"] _snake_case : int = int(config["num_epochs"] ) _snake_case : Optional[Any] = int(config["seed"] ) _snake_case : str = int(config["batch_size"] ) _snake_case : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(UpperCAmelCase ) _snake_case : List[Any] = get_dataloaders(UpperCAmelCase , UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case : List[str] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case : Tuple = model.to(accelerator.device ) # Instantiate optimizer _snake_case : Optional[Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase ) # Instantiate scheduler _snake_case : Optional[int] = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case : Optional[int] = accelerator.prepare( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Now we train the model for epoch in range(UpperCAmelCase ): model.train() for step, batch in enumerate(UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(UpperCAmelCase ): _snake_case : Any = model(**UpperCAmelCase ) _snake_case : Optional[int] = output.loss accelerator.backward(UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case : Dict = model(**UpperCAmelCase ) _snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 ) _snake_case : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=UpperCAmelCase , references=UpperCAmelCase , ) _snake_case : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase ) def A ( ): _snake_case : int = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=UpperCAmelCase , default=UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) _snake_case : Dict = parser.parse_args() _snake_case : List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(UpperCAmelCase , UpperCAmelCase ) if __name__ == "__main__": main()
710
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase :Tuple = logging.get_logger(__name__) __lowerCAmelCase :int = { 'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json', } class _a( __A ): lowerCamelCase__ :Optional[Any] = 'timesformer' def __init__( self , __snake_case=2_2_4 , __snake_case=1_6 , __snake_case=3 , __snake_case=8 , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=3_0_7_2 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1E-6 , __snake_case=True , __snake_case="divided_space_time" , __snake_case=0 , **__snake_case , ) -> str: '''simple docstring''' super().__init__(**__snake_case ) _snake_case : Optional[Any] = image_size _snake_case : Optional[int] = patch_size _snake_case : str = num_channels _snake_case : Tuple = num_frames _snake_case : Union[str, Any] = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : List[str] = hidden_act _snake_case : Optional[int] = hidden_dropout_prob _snake_case : Optional[Any] = attention_probs_dropout_prob _snake_case : Dict = initializer_range _snake_case : Optional[int] = layer_norm_eps _snake_case : str = qkv_bias _snake_case : List[str] = attention_type _snake_case : Optional[int] = drop_path_rate
278
0
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def UpperCamelCase_( _A :Tuple , _A :str )-> List[str]: UpperCamelCase__ = checkpoint UpperCamelCase__ = {} UpperCamelCase__ = vae_state_dict["encoder.conv_in.weight"] UpperCamelCase__ = vae_state_dict["encoder.conv_in.bias"] UpperCamelCase__ = vae_state_dict["encoder.conv_out.weight"] UpperCamelCase__ = vae_state_dict["encoder.conv_out.bias"] UpperCamelCase__ = vae_state_dict["encoder.norm_out.weight"] UpperCamelCase__ = vae_state_dict["encoder.norm_out.bias"] UpperCamelCase__ = vae_state_dict["decoder.conv_in.weight"] UpperCamelCase__ = vae_state_dict["decoder.conv_in.bias"] UpperCamelCase__ = vae_state_dict["decoder.conv_out.weight"] UpperCamelCase__ = vae_state_dict["decoder.conv_out.bias"] UpperCamelCase__ = vae_state_dict["decoder.norm_out.weight"] UpperCamelCase__ = vae_state_dict["decoder.norm_out.bias"] UpperCamelCase__ = vae_state_dict["quant_conv.weight"] UpperCamelCase__ = vae_state_dict["quant_conv.bias"] UpperCamelCase__ = vae_state_dict["post_quant_conv.weight"] UpperCamelCase__ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only UpperCamelCase__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) UpperCamelCase__ = { layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(_A ) } # Retrieves the keys for the decoder up blocks only UpperCamelCase__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) UpperCamelCase__ = { layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(_A ) } for i in range(_A ): UpperCamelCase__ = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key] if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: UpperCamelCase__ = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.weight''' ) UpperCamelCase__ = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.bias''' ) UpperCamelCase__ = renew_vae_resnet_paths(_A ) UpperCamelCase__ = {"old": F'''down.{i}.block''', "new": F'''down_blocks.{i}.resnets'''} assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A ) UpperCamelCase__ = [key for key in vae_state_dict if "encoder.mid.block" in key] UpperCamelCase__ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCamelCase__ = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key] UpperCamelCase__ = renew_vae_resnet_paths(_A ) UpperCamelCase__ = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A ) UpperCamelCase__ = [key for key in vae_state_dict if "encoder.mid.attn" in key] UpperCamelCase__ = renew_vae_attention_paths(_A ) UpperCamelCase__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A ) conv_attn_to_linear(_A ) for i in range(_A ): UpperCamelCase__ = num_up_blocks - 1 - i UpperCamelCase__ = [ key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key ] if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: UpperCamelCase__ = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.weight''' ] UpperCamelCase__ = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.bias''' ] UpperCamelCase__ = renew_vae_resnet_paths(_A ) UpperCamelCase__ = {"old": F'''up.{block_id}.block''', "new": F'''up_blocks.{i}.resnets'''} assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A ) UpperCamelCase__ = [key for key in vae_state_dict if "decoder.mid.block" in key] UpperCamelCase__ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCamelCase__ = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key] UpperCamelCase__ = renew_vae_resnet_paths(_A ) UpperCamelCase__ = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A ) UpperCamelCase__ = [key for key in vae_state_dict if "decoder.mid.attn" in key] UpperCamelCase__ = renew_vae_attention_paths(_A ) UpperCamelCase__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A ) conv_attn_to_linear(_A ) return new_checkpoint def UpperCamelCase_( _A :str , _A :str , )-> Union[str, Any]: # Only support V1 UpperCamelCase__ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) UpperCamelCase__ = io.BytesIO(r.content ) UpperCamelCase__ = OmegaConf.load(_A ) UpperCamelCase__ = 5_12 UpperCamelCase__ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open UpperCamelCase__ = {} with safe_open(_A , framework="pt" , device="cpu" ) as f: for key in f.keys(): UpperCamelCase__ = f.get_tensor(_A ) else: UpperCamelCase__ = torch.load(_A , map_location=_A )["state_dict"] # Convert the VAE model. UpperCamelCase__ = create_vae_diffusers_config(_A , image_size=_A ) UpperCamelCase__ = custom_convert_ldm_vae_checkpoint(_A , _A ) UpperCamelCase__ = AutoencoderKL(**_A ) vae.load_state_dict(_A ) vae.save_pretrained(_A ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') __UpperCamelCase = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
551
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { 'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json', 'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json', 'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json', 'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json', 'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json', 'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json', 'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json', 'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json', 'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json', } class lowerCamelCase__ ( UpperCAmelCase ): """simple docstring""" _UpperCamelCase : List[str] = 'xmod' def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=True , snake_case=None , snake_case=False , snake_case=2 , snake_case=False , snake_case=True , snake_case=True , snake_case=("en_XX",) , snake_case=None , **snake_case , ): '''simple docstring''' super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = hidden_act UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = initializer_range UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = position_embedding_type UpperCamelCase__ = use_cache UpperCamelCase__ = classifier_dropout UpperCamelCase__ = pre_norm UpperCamelCase__ = adapter_reduction_factor UpperCamelCase__ = adapter_layer_norm UpperCamelCase__ = adapter_reuse_layer_norm UpperCamelCase__ = ln_before_adapter UpperCamelCase__ = list(snake_case ) UpperCamelCase__ = default_language class lowerCamelCase__ ( UpperCAmelCase ): """simple docstring""" @property def snake_case__ ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
551
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowerCamelCase__ : Optional[int] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Dict = ["""GPTSw3Tokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys lowerCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
495
from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : Any = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : Any = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : Union[str, Any] = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : int = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : Optional[Any] = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : Tuple = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : List[str] = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : Union[str, Any] = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : Optional[Any] = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : Dict = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : Optional[Any] = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : Optional[Any] = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) class _snake_case ( metaclass=UpperCAmelCase_ ): __lowerCAmelCase : List[Any] = ['flax'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""]) @classmethod def lowercase__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""flax"""])
495
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( _a ): _A : Optional[int] = ['''image_processor''', '''tokenizer'''] _A : Union[str, Any] = '''ViTImageProcessor''' _A : List[str] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : int=None ,**SCREAMING_SNAKE_CASE__ : Tuple ): SCREAMING_SNAKE_CASE:str = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,SCREAMING_SNAKE_CASE__ ,) SCREAMING_SNAKE_CASE:int = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE:List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) def __call__( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : str=None ,**SCREAMING_SNAKE_CASE__ : str ): if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: SCREAMING_SNAKE_CASE:int = self.tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) if visual_prompt is not None: SCREAMING_SNAKE_CASE:Tuple = self.image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) if images is not None: SCREAMING_SNAKE_CASE:List[Any] = self.image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) if visual_prompt is not None and images is not None: SCREAMING_SNAKE_CASE:Union[str, Any] = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: SCREAMING_SNAKE_CASE:int = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: SCREAMING_SNAKE_CASE:str = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) ,tensor_type=SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : int ): return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : List[Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Any ): return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) @property def __UpperCamelCase ( self : Optional[Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,SCREAMING_SNAKE_CASE__ ,) return self.image_processor_class @property def __UpperCamelCase ( self : int ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,SCREAMING_SNAKE_CASE__ ,) return self.image_processor
143
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _snake_case ( _a ): _A : List[str] = '''camembert''' def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_522 ,SCREAMING_SNAKE_CASE__ : int=768 ,SCREAMING_SNAKE_CASE__ : List[Any]=12 ,SCREAMING_SNAKE_CASE__ : Any=12 ,SCREAMING_SNAKE_CASE__ : Tuple=3_072 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=512 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1e-12 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Any="absolute" ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,**SCREAMING_SNAKE_CASE__ : Tuple ,): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE:str = hidden_size SCREAMING_SNAKE_CASE:str = num_hidden_layers SCREAMING_SNAKE_CASE:List[str] = num_attention_heads SCREAMING_SNAKE_CASE:Optional[int] = hidden_act SCREAMING_SNAKE_CASE:int = intermediate_size SCREAMING_SNAKE_CASE:List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE:Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE:str = max_position_embeddings SCREAMING_SNAKE_CASE:Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE:Optional[int] = initializer_range SCREAMING_SNAKE_CASE:Tuple = layer_norm_eps SCREAMING_SNAKE_CASE:Optional[Any] = position_embedding_type SCREAMING_SNAKE_CASE:Optional[int] = use_cache SCREAMING_SNAKE_CASE:List[Any] = classifier_dropout class _snake_case ( _a ): @property def __UpperCamelCase ( self : List[str] ): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE:Any = {0: "batch", 1: "choice", 2: "sequence"} else: SCREAMING_SNAKE_CASE:str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
143
1
"""simple docstring""" import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Optional[int] = { 'artists_file': 'artists.json', 'lyrics_file': 'lyrics.json', 'genres_file': 'genres.json', } __SCREAMING_SNAKE_CASE : str = { 'artists_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json', }, 'genres_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json', }, 'lyrics_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json', }, } __SCREAMING_SNAKE_CASE : List[Any] = { 'jukebox': 512, } class __A (snake_case__): '''simple docstring''' __lowercase: int = VOCAB_FILES_NAMES __lowercase: Tuple = PRETRAINED_VOCAB_FILES_MAP __lowercase: int = PRETRAINED_LYRIC_TOKENS_SIZES __lowercase: str = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=["v3", "v2", "v2"] , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : List[str]="<|endoftext|>" , **UpperCAmelCase_ : List[str] , ) ->Any: """simple docstring""" snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token super().__init__( unk_token=UpperCAmelCase_ , n_genres=UpperCAmelCase_ , version=UpperCAmelCase_ , max_n_lyric_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = version snake_case_ = max_n_lyric_tokens snake_case_ = n_genres with open(UpperCAmelCase_ , encoding="""utf-8""" ) as vocab_handle: snake_case_ = json.load(UpperCAmelCase_ ) with open(UpperCAmelCase_ , encoding="""utf-8""" ) as vocab_handle: snake_case_ = json.load(UpperCAmelCase_ ) with open(UpperCAmelCase_ , encoding="""utf-8""" ) as vocab_handle: snake_case_ = json.load(UpperCAmelCase_ ) snake_case_ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: snake_case_ = oov.replace(R"""\-'""" , R"""\-+'""" ) snake_case_ = regex.compile(UpperCAmelCase_ ) snake_case_ = {v: k for k, v in self.artists_encoder.items()} snake_case_ = {v: k for k, v in self.genres_encoder.items()} snake_case_ = {v: k for k, v in self.lyrics_encoder.items()} @property def lowerCAmelCase ( self : Union[str, Any] ) ->int: """simple docstring""" return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]: """simple docstring""" return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ) ->int: """simple docstring""" snake_case_ = [self.artists_encoder.get(UpperCAmelCase_ , 0 ) for artist in list_artists] for genres in range(len(UpperCAmelCase_ ) ): snake_case_ = [self.genres_encoder.get(UpperCAmelCase_ , 0 ) for genre in list_genres[genres]] snake_case_ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) snake_case_ = [[self.lyrics_encoder.get(UpperCAmelCase_ , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[str] ) ->Any: """simple docstring""" return list(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" snake_case_ , snake_case_ , snake_case_ = self.prepare_for_tokenization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = self._tokenize(UpperCAmelCase_ ) return artist, genre, lyrics def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ) ->Tuple[str, str, str, Dict[str, Any]]: """simple docstring""" for idx in range(len(self.version ) ): if self.version[idx] == "v3": snake_case_ = artists[idx].lower() snake_case_ = [genres[idx].lower()] else: snake_case_ = self._normalize(artists[idx] ) + """.v2""" snake_case_ = [ self._normalize(UpperCAmelCase_ ) + """.v2""" for genre in genres[idx].split("""_""" ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": snake_case_ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" ) snake_case_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n""" snake_case_ = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase_ ) )} snake_case_ = 0 snake_case_ = len(UpperCAmelCase_ ) + 1 snake_case_ = self.vocab snake_case_ = {v: k for k, v in self.vocab.items()} snake_case_ = """""" else: snake_case_ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" ) snake_case_ = self._run_strip_accents(UpperCAmelCase_ ) snake_case_ = lyrics.replace("""\\""" , """\n""" ) snake_case_ = self.out_of_vocab.sub("""""" , UpperCAmelCase_ ), [], [] return artists, genres, lyrics def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[str] ) ->Tuple: """simple docstring""" snake_case_ = unicodedata.normalize("""NFD""" , UpperCAmelCase_ ) snake_case_ = [] for char in text: snake_case_ = unicodedata.category(UpperCAmelCase_ ) if cat == "Mn": continue output.append(UpperCAmelCase_ ) return "".join(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str ) ->str: """simple docstring""" snake_case_ = ( [chr(UpperCAmelCase_ ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )] + [chr(UpperCAmelCase_ ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )] + [chr(UpperCAmelCase_ ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )] + ["""."""] ) snake_case_ = frozenset(UpperCAmelCase_ ) snake_case_ = re.compile(R"""_+""" ) snake_case_ = """""".join([c if c in accepted else """_""" for c in text.lower()] ) snake_case_ = pattern.sub("""_""" , UpperCAmelCase_ ).strip("""_""" ) return text def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->str: """simple docstring""" return " ".join(UpperCAmelCase_ ) def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : bool = False ) ->Union[str, Any]: """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = TensorType(UpperCAmelCase_ ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( """Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" ) import tensorflow as tf snake_case_ = tf.constant snake_case_ = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" ) import torch snake_case_ = torch.tensor snake_case_ = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" ) import jax.numpy as jnp # noqa: F811 snake_case_ = jnp.array snake_case_ = _is_jax else: snake_case_ = np.asarray snake_case_ = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: snake_case_ = [inputs] if not is_tensor(UpperCAmelCase_ ): snake_case_ = as_tensor(UpperCAmelCase_ ) except: # noqa E722 raise ValueError( """Unable to create tensor, you should probably activate truncation and/or padding """ """with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" ) return inputs def __call__( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]="" , UpperCAmelCase_ : List[Any]="pt" ) ->BatchEncoding: """simple docstring""" snake_case_ = [0, 0, 0] snake_case_ = [artist] * len(self.version ) snake_case_ = [genres] * len(self.version ) snake_case_ , snake_case_ , snake_case_ = self.tokenize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ , snake_case_ = self._convert_token_to_id(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = [-INFINITY] * len(full_tokens[-1] ) snake_case_ = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase_ ) for i in range(len(self.version ) ) ] return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] ) with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase_ ) ) snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] ) with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase_ ) ) snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] ) with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase_ ) ) return (artists_file, genres_file, lyrics_file) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = self.artists_decoder.get(UpperCAmelCase_ ) snake_case_ = [self.genres_decoder.get(UpperCAmelCase_ ) for genre in genres_index] snake_case_ = [self.lyrics_decoder.get(UpperCAmelCase_ ) for character in lyric_index] return artist, genres, lyrics
2
"""simple docstring""" from functools import reduce __SCREAMING_SNAKE_CASE : Tuple = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def _a ( _SCREAMING_SNAKE_CASE = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) ) for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
2
1
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCamelCase_ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCamelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCamelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCamelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): """simple docstring""" def lowerCAmelCase_ ( self : Dict ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[ "https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score", "https://en.wikipedia.org/wiki/METEOR", ] , ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : int ): """simple docstring""" import nltk nltk.download("wordnet" ) if NLTK_VERSION >= version.Version("3.6.5" ): nltk.download("punkt" ) if NLTK_VERSION >= version.Version("3.6.6" ): nltk.download("omw-1.4" ) def lowerCAmelCase_ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=0.9 , __lowerCamelCase : Dict=3 , __lowerCamelCase : str=0.5 ): """simple docstring""" if NLTK_VERSION >= version.Version("3.6.5" ): _SCREAMING_SNAKE_CASE = [ meteor_score.single_meteor_score( word_tokenize(__lowerCamelCase ) , word_tokenize(__lowerCamelCase ) , alpha=__lowerCamelCase , beta=__lowerCamelCase , gamma=__lowerCamelCase ) for ref, pred in zip(__lowerCamelCase , __lowerCamelCase ) ] else: _SCREAMING_SNAKE_CASE = [ meteor_score.single_meteor_score(__lowerCamelCase , __lowerCamelCase , alpha=__lowerCamelCase , beta=__lowerCamelCase , gamma=__lowerCamelCase ) for ref, pred in zip(__lowerCamelCase , __lowerCamelCase ) ] return {"meteor": np.mean(__lowerCamelCase )}
418
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCamelCase_ = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json' with io.open(filename, 'r', encoding='utf-8') as f: lowerCamelCase_ = json.load(f) @require_torch class lowercase_ ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : Any ): """simple docstring""" return FSMTTokenizer.from_pretrained(__lowerCamelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : Any ): """simple docstring""" _SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 2_6.0], ["ru-en", 2_2.0], ["en-de", 2_2.0], ["de-en", 2_9.0], ] ) @slow def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ): """simple docstring""" # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality _SCREAMING_SNAKE_CASE = F"""facebook/wmt19-{pair}""" _SCREAMING_SNAKE_CASE = self.get_tokenizer(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = self.get_model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = bleu_data[pair]["src"] _SCREAMING_SNAKE_CASE = bleu_data[pair]["tgt"] _SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" , truncation=__lowerCamelCase , padding="longest" ).to(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = model.generate( input_ids=batch.input_ids , num_beams=8 , ) _SCREAMING_SNAKE_CASE = tokenizer.batch_decode( __lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = calculate_bleu(__lowerCamelCase , __lowerCamelCase ) print(__lowerCamelCase ) self.assertGreaterEqual(scores["bleu"] , __lowerCamelCase )
418
1
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS snake_case = logging.get_logger(__name__) snake_case = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, '''constant''': get_constant_schedule, '''constant_w_warmup''': get_constant_schedule_with_warmup, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : Dict , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=None , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[int] ): """simple docstring""" super().__init__(*__lowerCamelCase , **__lowerCamelCase ) if config is None: assert isinstance(self.model , __lowerCamelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f""" {self.model.__class__}""" ) _snake_case = self.model.config else: _snake_case = config _snake_case = data_args _snake_case = self.config.tgt_vocab_size if isinstance(self.config , __lowerCamelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" ''' padding..''' ) if self.args.label_smoothing == 0: _snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss _snake_case = label_smoothed_nll_loss def __UpperCAmelCase ( self : str , __lowerCamelCase : int ): """simple docstring""" if self.optimizer is None: _snake_case = ['''bias''', '''LayerNorm.weight'''] _snake_case = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] _snake_case = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: _snake_case = Adafactor _snake_case = {'''scale_parameter''': False, '''relative_step''': False} else: _snake_case = AdamW _snake_case = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } _snake_case = self.args.learning_rate if self.sharded_ddp: _snake_case = OSS( params=__lowerCamelCase , optim=__lowerCamelCase , **__lowerCamelCase , ) else: _snake_case = optimizer_cls(__lowerCamelCase , **__lowerCamelCase ) if self.lr_scheduler is None: _snake_case = self._get_lr_scheduler(__lowerCamelCase ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def __UpperCAmelCase ( self : int , __lowerCamelCase : int ): """simple docstring""" _snake_case = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": _snake_case = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": _snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: _snake_case = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCamelCase ) return scheduler def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ): """simple docstring""" if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token _snake_case = model(**__lowerCamelCase , use_cache=__lowerCamelCase )[0] _snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models _snake_case , _snake_case = model(**__lowerCamelCase , labels=__lowerCamelCase , use_cache=__lowerCamelCase )[:2] else: # compute label smoothed loss _snake_case = model(**__lowerCamelCase , use_cache=__lowerCamelCase )[0] _snake_case = torch.nn.functional.log_softmax(__lowerCamelCase , dim=-1 ) _snake_case , _snake_case = self.loss_fn(__lowerCamelCase , __lowerCamelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ): """simple docstring""" _snake_case = inputs.pop('''labels''' ) _snake_case , _snake_case = self._compute_loss(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return loss def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : nn.Module , __lowerCamelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCamelCase : bool , __lowerCamelCase : Optional[List[str]] = None , ): """simple docstring""" _snake_case = self._prepare_inputs(__lowerCamelCase ) _snake_case = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: _snake_case = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **__lowerCamelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: _snake_case = self._pad_tensors_to_max_len(__lowerCamelCase , gen_kwargs['''max_length'''] ) _snake_case = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data _snake_case , _snake_case = self._compute_loss(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _snake_case = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) _snake_case = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: _snake_case = self._pad_tensors_to_max_len(__lowerCamelCase , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def __UpperCAmelCase ( self : int , __lowerCamelCase : Any , __lowerCamelCase : int ): """simple docstring""" # If PAD token is not defined at least EOS token has to be defined _snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' f""" padded to `max_length`={max_length}""" ) _snake_case = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) _snake_case = tensor return padded_tensor
404
"""simple docstring""" import math import sys def snake_case ( lowerCAmelCase_ ) -> int: if number != int(lowerCAmelCase_ ): raise ValueError('''the value of input must be a natural number''' ) if number < 0: raise ValueError('''the value of input must not be a negative number''' ) if number == 0: return 1 _snake_case = [-1] * (number + 1) _snake_case = 0 for i in range(1 , number + 1 ): _snake_case = sys.maxsize _snake_case = int(math.sqrt(lowerCAmelCase_ ) ) for j in range(1 , root + 1 ): _snake_case = 1 + answers[i - (j**2)] _snake_case = min(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
404
1