code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def _A ( ): """simple docstring""" a =ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=lowercase ) a =parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=lowercase ) env_command_parser(subparsers=lowercase ) launch_command_parser(subparsers=lowercase ) tpu_command_parser(subparsers=lowercase ) test_command_parser(subparsers=lowercase ) # Let's go a =parser.parse_args() if not hasattr(lowercase , '''func''' ): parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
81
import os import numpy import onnx def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= a.name __lowercase= b.name __lowercase= '' __lowercase= '' __lowercase= a == b __lowercase= name_a __lowercase= name_b return res def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase__ , lowercase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) _graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(lowercase__ , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= list(model.graph.initializer ) __lowercase= list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __lowercase= inits[i].name __lowercase= inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' __lowercase= os.path.dirname(lowercase__ ) __lowercase= os.path.basename(lowercase__ ) __lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) ) __lowercase= list(model.graph.initializer ) __lowercase= set() __lowercase= {} __lowercase= [] __lowercase= 0 for i in range(len(lowercase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowercase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowercase__ ) dup_set.add(lowercase__ ) __lowercase= inits[j].data_type __lowercase= numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 1_1: mem_size *= 8 else: print('unexpected data type: ' , lowercase__ ) total_reduced_size += mem_size __lowercase= inits[i].name __lowercase= inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase__ ) else: __lowercase= [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' ) __lowercase= sorted(lowercase__ ) _remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ ) __lowercase= 'optimized_' + model_file_name __lowercase= os.path.join(lowercase__ , lowercase__ ) onnx.save(lowercase__ , lowercase__ ) return new_model
295
0
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase : def __init__( self , _snake_case , _snake_case=2 , _snake_case=3 , _snake_case=4 , _snake_case=2 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=36 , _snake_case=3 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=6 , _snake_case=6 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=1000 , ): """simple docstring""" _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = text_seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = coordinate_size _lowerCAmelCase = shape_size _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope _lowerCAmelCase = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _lowerCAmelCase = text_seq_length _lowerCAmelCase = (image_size // patch_size) ** 2 + 1 _lowerCAmelCase = self.text_seq_length + self.image_seq_length def snake_case ( self ): """simple docstring""" _lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _lowerCAmelCase = bbox[i, j, 3] _lowerCAmelCase = bbox[i, j, 1] _lowerCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowerCAmelCase = bbox[i, j, 2] _lowerCAmelCase = bbox[i, j, 0] _lowerCAmelCase = t _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) _lowerCAmelCase = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = LayoutLMvaModel(config=_snake_case ) model.to(_snake_case ) model.eval() # text + image _lowerCAmelCase = model(_snake_case , pixel_values=_snake_case ) _lowerCAmelCase = model( _snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) _lowerCAmelCase = model(_snake_case , bbox=_snake_case , pixel_values=_snake_case , token_type_ids=_snake_case ) _lowerCAmelCase = model(_snake_case , bbox=_snake_case , pixel_values=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only _lowerCAmelCase = model(_snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only _lowerCAmelCase = model(pixel_values=_snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = self.num_labels _lowerCAmelCase = LayoutLMvaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() _lowerCAmelCase = model( _snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = self.num_labels _lowerCAmelCase = LayoutLMvaForTokenClassification(config=_snake_case ) model.to(_snake_case ) model.eval() _lowerCAmelCase = model( _snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=_snake_case ) model.to(_snake_case ) model.eval() _lowerCAmelCase = model( _snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) __lowerCamelCase = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" return True def snake_case ( self ): """simple docstring""" _lowerCAmelCase = LayoutLMvaModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def snake_case ( self , _snake_case , _snake_case , _snake_case=False ): """simple docstring""" _lowerCAmelCase = copy.deepcopy(_snake_case ) if model_class in get_values(_snake_case ): _lowerCAmelCase = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(_snake_case , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_snake_case ): _lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_snake_case ) elif model_class in get_values(_snake_case ): _lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_snake_case ) _lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_snake_case ) elif model_class in [ *get_values(_snake_case ), ]: _lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_snake_case ) elif model_class in [ *get_values(_snake_case ), ]: _lowerCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_snake_case , ) return inputs_dict def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase = type self.model_tester.create_and_check_model(*_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case ) @slow def snake_case ( self ): """simple docstring""" for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = LayoutLMvaModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def _UpperCAmelCase ( ): """simple docstring""" _lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def snake_case ( self ): """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=_snake_case ) if is_vision_available() else None @slow def snake_case ( self ): """simple docstring""" _lowerCAmelCase = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(_snake_case ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values.to(_snake_case ) _lowerCAmelCase = torch.tensor([[1, 2]] ) _lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass _lowerCAmelCase = model( input_ids=input_ids.to(_snake_case ) , bbox=bbox.to(_snake_case ) , pixel_values=pixel_values.to(_snake_case ) , ) # verify the logits _lowerCAmelCase = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , _snake_case ) _lowerCAmelCase = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1e-4 ) )
82
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCAmelCase = parser.parse_args() if args.check_lib: lowerCAmelCase = importlib.import_module('''transformers''') lowerCAmelCase = Path(transformers_module.__file__).parent else: lowerCAmelCase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
295
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') snake_case_ : Any = logging.getLogger(__name__) @dataclass class lowercase__ : lowercase__ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) lowercase__ = field( default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) lowercase__ = field( default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) lowercase__ = field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) lowercase__ = field( default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) lowercase__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) lowercase__ = field( default=lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) @dataclass class lowercase__ : lowercase__ = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} ) lowercase__ = field( default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) lowercase__ = field( default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) lowercase__ = field( default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) lowercase__ = field( default=lowercase , metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) lowercase__ = field( default=lowercase , metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } , ) lowercase__ = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) lowercase__ = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def UpperCamelCase_ ( self : str ): '''simple docstring''' if self.train_file is not None: _UpperCamelCase : List[Any] = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _UpperCamelCase : Union[str, Any] = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class lowercase__ : lowercase__ = 42 lowercase__ = True lowercase__ = None lowercase__ = None def __call__( self : Optional[Any] ,lowerCamelCase__ : Dict ): '''simple docstring''' _UpperCamelCase : List[str] = 'label' if 'label' in features[0].keys() else 'labels' _UpperCamelCase : List[Any] = [feature.pop(lowerCamelCase__ ) for feature in features] _UpperCamelCase : Dict = len(lowerCamelCase__ ) _UpperCamelCase : List[str] = len(features[0]['input_ids'] ) _UpperCamelCase : List[Any] = [ [{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features ] _UpperCamelCase : str = list(chain(*lowerCamelCase__ ) ) _UpperCamelCase : Tuple = self.tokenizer.pad( lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,) # Un-flatten _UpperCamelCase : str = {k: v.view(lowerCamelCase__ ,lowerCamelCase__ ,-1 ) for k, v in batch.items()} # Add back labels _UpperCamelCase : Optional[int] = torch.tensor(lowerCamelCase__ ,dtype=torch.intaa ) return batch def A__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCamelCase : Optional[Any] = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase_ ) datasets.utils.logging.set_verbosity(UpperCAmelCase_ ) transformers.utils.logging.set_verbosity(UpperCAmelCase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _UpperCamelCase : Union[str, Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCamelCase : List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _UpperCamelCase : Optional[int] = {} if data_args.train_file is not None: _UpperCamelCase : Tuple = data_args.train_file if data_args.validation_file is not None: _UpperCamelCase : Tuple = data_args.validation_file _UpperCamelCase : Any = data_args.train_file.split('.' )[-1] _UpperCamelCase : Union[str, Any] = load_dataset( UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. _UpperCamelCase : List[str] = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCamelCase : int = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCamelCase : Dict = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _UpperCamelCase : Any = [f'ending{i}' for i in range(4 )] _UpperCamelCase : int = 'sent1' _UpperCamelCase : List[str] = 'sent2' if data_args.max_seq_length is None: _UpperCamelCase : int = tokenizer.model_max_length if max_seq_length > 1_0_2_4: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.' ) _UpperCamelCase : int = 1_0_2_4 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) _UpperCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCAmelCase_ ): _UpperCamelCase : str = [[context] * 4 for context in examples[context_name]] _UpperCamelCase : Optional[Any] = examples[question_header_name] _UpperCamelCase : Tuple = [ [f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ ) ] # Flatten out _UpperCamelCase : Optional[int] = list(chain(*UpperCAmelCase_ ) ) _UpperCamelCase : Optional[Any] = list(chain(*UpperCAmelCase_ ) ) # Tokenize _UpperCamelCase : Tuple = tokenizer( UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) _UpperCamelCase : Optional[Any] = raw_datasets['train'] if data_args.max_train_samples is not None: _UpperCamelCase : Tuple = min(len(UpperCAmelCase_ ) , data_args.max_train_samples ) _UpperCamelCase : Tuple = train_dataset.select(range(UpperCAmelCase_ ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): _UpperCamelCase : Union[str, Any] = train_dataset.map( UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) _UpperCamelCase : str = raw_datasets['validation'] if data_args.max_eval_samples is not None: _UpperCamelCase : Union[str, Any] = min(len(UpperCAmelCase_ ) , data_args.max_eval_samples ) _UpperCamelCase : str = eval_dataset.select(range(UpperCAmelCase_ ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): _UpperCamelCase : Dict = eval_dataset.map( UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator _UpperCamelCase : List[Any] = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCAmelCase_ ): _UpperCamelCase , _UpperCamelCase : Union[str, Any] = eval_predictions _UpperCamelCase : List[str] = np.argmax(UpperCAmelCase_ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _UpperCamelCase : Optional[int] = Trainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , ) # Training if training_args.do_train: _UpperCamelCase : Optional[int] = None if training_args.resume_from_checkpoint is not None: _UpperCamelCase : str = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCamelCase : int = last_checkpoint _UpperCamelCase : List[str] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ ) trainer.save_model() # Saves the tokenizer too for easy upload _UpperCamelCase : Union[str, Any] = train_result.metrics _UpperCamelCase : Optional[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ ) ) _UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) trainer.log_metrics('train' , UpperCAmelCase_ ) trainer.save_metrics('train' , UpperCAmelCase_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCamelCase : List[Any] = trainer.evaluate() _UpperCamelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ ) _UpperCamelCase : int = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) trainer.log_metrics('eval' , UpperCAmelCase_ ) trainer.save_metrics('eval' , UpperCAmelCase_ ) _UpperCamelCase : Optional[int] = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase_ ) else: trainer.create_model_card(**UpperCAmelCase_ ) def A__ ( UpperCAmelCase_ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
83
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if len(lowercase__ ) <= 1 or n <= 1: return insert_next(lowercase__ , n - 1 ) rec_insertion_sort(lowercase__ , n - 1 ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowercase, __lowercase= ( collection[index], collection[index - 1], ) insert_next(lowercase__ , index + 1 ) if __name__ == "__main__": lowerCAmelCase = input('''Enter integers separated by spaces: ''') lowerCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
295
0
"""simple docstring""" def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Dict ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Any = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = 0 while b > 0: if b & 1: lowerCAmelCase_ :str = ((res % c) + (a % c)) % c a += a b >>= 1 return res
84
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list: '''simple docstring''' __lowercase= [] __lowercase= 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) __lowercase= index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
295
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Any = { "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class _snake_case ( lowercase_ , lowercase_ ): lowerCAmelCase_ : Tuple = "swin" lowerCAmelCase_ : Dict = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , a__=None , a__=None , **a__ , ) -> Optional[int]: '''simple docstring''' super().__init__(**a__ ) snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = embed_dim snake_case_ = depths snake_case_ = len(a__ ) snake_case_ = num_heads snake_case_ = window_size snake_case_ = mlp_ratio snake_case_ = qkv_bias snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = drop_path_rate snake_case_ = hidden_act snake_case_ = use_absolute_embeddings snake_case_ = layer_norm_eps snake_case_ = initializer_range snake_case_ = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case_ = int(embed_dim * 2 ** (len(a__ ) - 1) ) snake_case_ = ["stem"] + [F'stage{idx}' for idx in range(1 , len(a__ ) + 1 )] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=a__ , out_indices=a__ , stage_names=self.stage_names ) class _snake_case ( lowercase_ ): lowerCAmelCase_ : str = version.parse("1.11" ) @property def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowerCAmelCase__ ( self ) -> float: '''simple docstring''' return 1e-4
85
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict: '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase__ ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''The csv file to plot.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) UpperCamelCase_ : Optional[List[str]] =list_field( default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: int(lowercase__ ) return True except ValueError: return False def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: float(lowercase__ ) return True except ValueError: return False class A : def __init__(self , lowerCAmelCase ): __lowercase= args __lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: __lowercase= csv.DictReader(lowerCAmelCase ) for row in reader: __lowercase= row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None __lowercase= int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None __lowercase= float(row['result'] ) def _A (self ): __lowercase, __lowercase= plt.subplots() __lowercase= 'Time usage' if self.args.is_time else 'Memory usage' __lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): __lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) ) __lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) ) __lowercase= self.result_dict[model_name]['result'] ((__lowercase), (__lowercase))= ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __lowercase= ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __lowercase= np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , ) else: __lowercase= np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__lowercase), (__lowercase))= ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) __lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )] plt.scatter( lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' ) plt.plot(lowerCAmelCase , lowerCAmelCase , '--' ) title_str += f' {label_model_name} vs.' __lowercase= title_str[:-4] __lowercase= 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase ) plt.xlabel(lowerCAmelCase ) plt.ylabel(lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= HfArgumentParser(lowercase__ ) __lowercase= parser.parse_args_into_dataclasses()[0] __lowercase= Plot(args=lowercase__ ) plot.plot() if __name__ == "__main__": main()
295
0
"""simple docstring""" from __future__ import annotations from math import pi, sqrt def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): if inductance <= 0: raise ValueError('Inductance cannot be 0 or negative' ) elif capacitance <= 0: raise ValueError('Capacitance cannot be 0 or negative' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
86
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int =DPRContextEncoderTokenizer class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer lowerCAmelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(A_ ) class A : def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) elif titles is None or texts is None: __lowercase= titles if texts is None else texts return super().__call__( lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles] __lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts] __lowercase= len(lowerCAmelCase ) __lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.' __lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase ) ] } if return_attention_mask is not False: __lowercase= [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase= attention_mask return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ): __lowercase= reader_input['input_ids'] __lowercase, __lowercase, __lowercase= reader_output[:3] __lowercase= len(lowerCAmelCase ) __lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ ) __lowercase= [] for doc_id in sorted_docs: __lowercase= list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase= sequence_ids.index(self.pad_token_id ) else: __lowercase= len(lowerCAmelCase ) __lowercase= self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= [] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase ) __lowercase= [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __lowercase= end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class A ( A_ , A_ ): UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : Dict =DPRReaderTokenizer
295
0
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]): lowercase__ : str = 1.5 lowercase__ : Any = int(factor * num_class_images) lowercase__ : Optional[Any] = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowerCamelCase , aesthetic_weight=0.1) os.makedirs(f'''{class_data_dir}/images''' , exist_ok=_lowerCamelCase) if len(list(Path(f'''{class_data_dir}/images''').iterdir())) >= num_class_images: return while True: lowercase__ : Dict = client.query(text=_lowerCamelCase) if len(_lowerCamelCase) >= factor * num_class_images or num_images > 1E4: break else: lowercase__ : List[Any] = int(factor * num_images) lowercase__ : Any = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowerCamelCase , aesthetic_weight=0.1 , ) lowercase__ : List[str] = 0 lowercase__ : Dict = 0 lowercase__ : int = tqdm(desc="downloading real regularization images" , total=_lowerCamelCase) with open(f'''{class_data_dir}/caption.txt''' , "w") as fa, open(f'''{class_data_dir}/urls.txt''' , "w") as fa, open( f'''{class_data_dir}/images.txt''' , "w") as fa: while total < num_class_images: lowercase__ : List[str] = class_images[count] count += 1 try: lowercase__ : Union[str, Any] = requests.get(images["url"]) if img.status_code == 200: lowercase__ : List[str] = Image.open(BytesIO(img.content)) with open(f'''{class_data_dir}/images/{total}.jpg''' , "wb") as f: f.write(img.content) fa.write(images["caption"] + "\n") fa.write(images["url"] + "\n") fa.write(f'''{class_data_dir}/images/{total}.jpg''' + "\n") total += 1 pbar.update(1) else: continue except Exception: continue return def lowercase_ ( ): lowercase__ : Optional[int] = argparse.ArgumentParser("" , add_help=_lowerCamelCase) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowerCamelCase , type=_lowerCamelCase) parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowerCamelCase , type=_lowerCamelCase) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowerCamelCase) return parser.parse_args() if __name__ == "__main__": UpperCamelCase = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
87
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A ( nn.Module ): def __init__(self ): super().__init__() __lowercase= nn.Linear(3 , 4 ) __lowercase= nn.BatchNormad(4 ) __lowercase= nn.Linear(4 , 5 ) def _A (self , lowerCAmelCase ): return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) ) class A ( A_ ): def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): return (args[0] + 1,) + args[1:], kwargs class A ( A_ ): def _A (self , lowerCAmelCase , lowerCAmelCase ): return output + 1 class A ( unittest.TestCase ): def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(test_model._hf_hook , lowerCAmelCase ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase ) self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(x + 1 ) __lowercase= test_model(x + 2 ) __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) ) self.assertTrue(outputa.requires_grad ) __lowercase= True __lowercase= test_model(lowerCAmelCase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) ) __lowercase= torch.randn(2 , 3 ).to(0 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(0 ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload __lowercase= { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
295
0
def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(length - 1 ): __magic_name__ = i for k in range(i + 1, A_ ): if collection[k] < collection[least]: __magic_name__ = k if least != i: __magic_name__ , __magic_name__ = (collection[i], collection[least]) return collection if __name__ == "__main__": __lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : str = [int(item) for item in user_input.split(',')] print(selection_sort(unsorted))
88
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A ( unittest.TestCase ): def _A (self ): __lowercase= logging.get_logger() # the current default level is logging.WARNING __lowercase= logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) def _A (self ): __lowercase= logging.get_verbosity() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase ) __lowercase= logging.log_levels[env_level_str] __lowercase= logging.get_verbosity() self.assertEqual( lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __lowercase= '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowercase= logging.logging.getLogger() with CaptureLogger(lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def _A (self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
295
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: _a : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'vit.embeddings.cls_token'), ('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _a : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Dict: for i in range(config.num_hidden_layers ): if base_model: _a : Union[str, Any] = '' else: _a : Optional[Any] = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _a : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) _a : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _a : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] _a : Union[str, Any] = in_proj_bias[: config.hidden_size] _a : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _a : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _a : Dict = in_proj_weight[ -config.hidden_size :, : ] _a : Optional[Any] = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]: _a : int = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _a : Tuple = dct.pop(lowerCAmelCase_ ) _a : Optional[Any] = val def __lowerCamelCase ( ) -> Any: _a : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg' _a : List[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _a : Any = ViTConfig() _a : List[Any] = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": _a : List[Any] = True _a : str = int(vit_name[-12:-10] ) _a : Any = int(vit_name[-9:-6] ) else: _a : str = 1000 _a : List[Any] = 'huggingface/label-files' _a : int = 'imagenet-1k-id2label.json' _a : Any = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) ) _a : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} _a : str = idalabel _a : int = {v: k for k, v in idalabel.items()} _a : List[Any] = int(vit_name[-6:-4] ) _a : Optional[int] = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('tiny' ): _a : Optional[int] = 192 _a : Dict = 768 _a : List[Any] = 12 _a : Union[str, Any] = 3 elif vit_name[9:].startswith('small' ): _a : Optional[Any] = 384 _a : str = 1536 _a : str = 12 _a : Union[str, Any] = 6 else: pass else: if vit_name[4:].startswith('small' ): _a : int = 768 _a : str = 2304 _a : List[str] = 8 _a : Optional[int] = 8 elif vit_name[4:].startswith('base' ): pass elif vit_name[4:].startswith('large' ): _a : Any = 1024 _a : Optional[int] = 4096 _a : Union[str, Any] = 24 _a : Any = 16 elif vit_name[4:].startswith('huge' ): _a : str = 1280 _a : Dict = 5120 _a : str = 32 _a : str = 16 # load original model from timm _a : Union[str, Any] = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys _a : str = timm_model.state_dict() if base_model: remove_classification_head_(lowerCAmelCase_ ) _a : Optional[Any] = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ ) for src, dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # load HuggingFace model if vit_name[-5:] == "in21k": _a : Union[str, Any] = ViTModel(lowerCAmelCase_ ).eval() else: _a : str = ViTForImageClassification(lowerCAmelCase_ ).eval() model.load_state_dict(lowerCAmelCase_ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: _a : Any = DeiTImageProcessor(size=config.image_size ) else: _a : Union[str, Any] = ViTImageProcessor(size=config.image_size ) _a : List[str] = image_processor(images=prepare_img() , return_tensors='pt' ) _a : str = encoding['pixel_values'] _a : List[str] = model(lowerCAmelCase_ ) if base_model: _a : Optional[Any] = timm_model.forward_features(lowerCAmelCase_ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowerCAmelCase_ , outputs.pooler_output , atol=1E-3 ) else: _a : Union[str, Any] = timm_model(lowerCAmelCase_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_patch16_224''', type=str, help='''Name of the ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __lowerCAmelCase = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
89
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase = '''▁''' lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } lowerCAmelCase = { '''google/pegasus-xsum''': 5_1_2, } lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= offset if additional_special_tokens is not None: if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError( f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is' f' {type(lowerCAmelCase )}' ) __lowercase= ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 ) ] if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) __lowercase= additional_special_tokens_extended else: __lowercase= [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )] __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) __lowercase= mask_token_sent __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # add special tokens to encoder dict __lowercase= { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __lowercase= {v: k for k, v in self.encoder.items()} @property def _A (self ): return len(self.sp_model ) + self.offset def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A (self , lowerCAmelCase ): return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __lowercase= self.sp_model.piece_to_id(lowerCAmelCase ) return sp_id + self.offset def _A (self , lowerCAmelCase ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __lowercase= self.sp_model.IdToPiece(index - self.offset ) return token def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def _A (self , lowerCAmelCase=False ): return 1 def _A (self , lowerCAmelCase ): __lowercase= set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return self._special_token_mask(lowerCAmelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
295
0
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __A = (3, 9, -11, 0, 7, 5, 1, -1) __A = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __lowerCAmelCase : """simple docstring""" snake_case_ = 42 snake_case_ = 42 class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ ) -> None: '''simple docstring''' __lowerCamelCase = None for i in sorted(lowerCamelCase__ , reverse=lowerCamelCase__ ): __lowerCamelCase = Node(lowerCamelCase__ , self.head ) def __iter__( self ) -> Iterator[int]: '''simple docstring''' __lowerCamelCase = self.head while node: yield node.data __lowerCamelCase = node.next_node def __len__( self ) -> int: '''simple docstring''' return sum(1 for _ in self ) def __str__( self ) -> str: '''simple docstring''' return " -> ".join([str(lowerCamelCase__ ) for node in self] ) def lowerCamelCase_ ( UpperCamelCase__ : SortedLinkedList , UpperCamelCase__ : SortedLinkedList ) -> SortedLinkedList: """simple docstring""" return SortedLinkedList(list(UpperCamelCase__ ) + list(UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() __A = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
90
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
0
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model UpperCAmelCase_ : str = """0.12""" # assumed parallelism: 8 if is_torch_available(): import torch def _A (__a , __a , __a=None ) -> str: """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE_ : Any = random.Random() SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 for dim in shape: total_dims *= dim SCREAMING_SNAKE_CASE_ : Optional[Any] = [] for _ in range(__a ): values.append(rng.randint(0 , vocab_size - 1 ) ) SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(__a , dtype=jnp.intaa ).reshape(__a ) return output def _A (__a , __a=None ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor(__a , vocab_size=2 , rng=__a ) # make sure that at least one token is attended to for each batch SCREAMING_SNAKE_CASE_ : str = 1 return attn_mask @require_flax class lowerCAmelCase__ : '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = () def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 SCREAMING_SNAKE_CASE_ : List[Any] = 2 SCREAMING_SNAKE_CASE_ : Tuple = inputs['''input_ids'''].shape[-1] // 2 SCREAMING_SNAKE_CASE_ : Tuple = inputs['''input_ids'''][:max_batch_size, :sequence_length] SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.ones_like(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` SCREAMING_SNAKE_CASE_ : Any = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self._get_input_ids_and_config() SCREAMING_SNAKE_CASE_ : Tuple = False SCREAMING_SNAKE_CASE_ : int = max_length SCREAMING_SNAKE_CASE_ : str = 0 for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = pt_model_class(lowercase_).eval() SCREAMING_SNAKE_CASE_ : int = load_flax_weights_in_pytorch_model(lowercase_ , flax_model.params) SCREAMING_SNAKE_CASE_ : Tuple = flax_model.generate(lowercase_).sequences SCREAMING_SNAKE_CASE_ : Dict = pt_model.generate(torch.tensor(lowercase_ , dtype=torch.long)) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: SCREAMING_SNAKE_CASE_ : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist()) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_input_ids_and_config() SCREAMING_SNAKE_CASE_ : Any = False SCREAMING_SNAKE_CASE_ : Optional[Any] = max_length for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ : List[str] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(lowercase_).sequences self.assertEqual(generation_outputs.shape[-1] , lowercase_) SCREAMING_SNAKE_CASE_ : Any = jit(model.generate) SCREAMING_SNAKE_CASE_ : Optional[int] = jit_generate(lowercase_).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self._get_input_ids_and_config() SCREAMING_SNAKE_CASE_ : Any = True SCREAMING_SNAKE_CASE_ : List[Any] = max_length for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = model.generate(lowercase_).sequences self.assertEqual(generation_outputs.shape[-1] , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = jit(model.generate) SCREAMING_SNAKE_CASE_ : Any = jit_generate(lowercase_).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self._get_input_ids_and_config() SCREAMING_SNAKE_CASE_ : Dict = False SCREAMING_SNAKE_CASE_ : int = max_length SCREAMING_SNAKE_CASE_ : Tuple = 2 for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : Any = model.generate(lowercase_).sequences self.assertEqual(generation_outputs.shape[-1] , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = jit(model.generate) SCREAMING_SNAKE_CASE_ : Tuple = jit_generate(lowercase_).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self._get_input_ids_and_config() SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : Optional[Any] = max_length SCREAMING_SNAKE_CASE_ : str = 2 SCREAMING_SNAKE_CASE_ : Optional[Any] = 2 for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : int = model.generate(lowercase_).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self._get_input_ids_and_config() SCREAMING_SNAKE_CASE_ : Union[str, Any] = True SCREAMING_SNAKE_CASE_ : str = max_length SCREAMING_SNAKE_CASE_ : Tuple = 0.8 SCREAMING_SNAKE_CASE_ : Tuple = 10 SCREAMING_SNAKE_CASE_ : Optional[int] = 0.3 SCREAMING_SNAKE_CASE_ : List[str] = 1 SCREAMING_SNAKE_CASE_ : Tuple = 8 SCREAMING_SNAKE_CASE_ : Optional[Any] = 9 for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ : Tuple = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(lowercase_).sequences self.assertEqual(generation_outputs.shape[-1] , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = jit(model.generate) SCREAMING_SNAKE_CASE_ : List[Any] = jit_generate(lowercase_).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self._get_input_ids_and_config() SCREAMING_SNAKE_CASE_ : Any = max_length SCREAMING_SNAKE_CASE_ : List[Any] = 1 SCREAMING_SNAKE_CASE_ : Optional[int] = 8 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 9 for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : int = model.generate(lowercase_).sequences self.assertEqual(generation_outputs.shape[-1] , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = jit(model.generate) SCREAMING_SNAKE_CASE_ : Union[str, Any] = jit_generate(lowercase_).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_input_ids_and_config() SCREAMING_SNAKE_CASE_ : int = max_length SCREAMING_SNAKE_CASE_ : List[Any] = 2 SCREAMING_SNAKE_CASE_ : Tuple = 1 SCREAMING_SNAKE_CASE_ : Dict = 8 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 9 for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model.generate(lowercase_).sequences self.assertEqual(generation_outputs.shape[-1] , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = jit(model.generate) SCREAMING_SNAKE_CASE_ : Dict = jit_generate(lowercase_).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self._get_input_ids_and_config() # pad attention mask on the left SCREAMING_SNAKE_CASE_ : str = attention_mask.at[(0, 0)].set(0) SCREAMING_SNAKE_CASE_ : Dict = False SCREAMING_SNAKE_CASE_ : Tuple = max_length for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(lowercase_ , attention_mask=lowercase_).sequences self.assertEqual(generation_outputs.shape[-1] , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = jit(model.generate) SCREAMING_SNAKE_CASE_ : List[Any] = jit_generate(lowercase_ , attention_mask=lowercase_).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self._get_input_ids_and_config() # pad attention mask on the left SCREAMING_SNAKE_CASE_ : List[Any] = attention_mask.at[(0, 0)].set(0) SCREAMING_SNAKE_CASE_ : Union[str, Any] = True SCREAMING_SNAKE_CASE_ : List[Any] = max_length for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = model.generate(lowercase_ , attention_mask=lowercase_).sequences self.assertEqual(generation_outputs.shape[-1] , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = jit(model.generate) SCREAMING_SNAKE_CASE_ : int = jit_generate(lowercase_ , attention_mask=lowercase_).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_input_ids_and_config() # pad attention mask on the left SCREAMING_SNAKE_CASE_ : Any = attention_mask.at[(0, 0)].set(0) SCREAMING_SNAKE_CASE_ : Dict = 2 SCREAMING_SNAKE_CASE_ : Any = max_length for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = model.generate(lowercase_ , attention_mask=lowercase_).sequences self.assertEqual(generation_outputs.shape[-1] , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = jit(model.generate) SCREAMING_SNAKE_CASE_ : Optional[Any] = jit_generate(lowercase_ , attention_mask=lowercase_).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) @require_flax class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''') SCREAMING_SNAKE_CASE_ : Any = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''') SCREAMING_SNAKE_CASE_ : List[str] = '''Hello world''' SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(lowercase_ , return_tensors='''np''').input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(lowercase_ , '''do_samples'''): model.generate(lowercase_ , do_samples=lowercase_) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(lowercase_ , '''foo'''): SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''foo''': '''bar'''} model.generate(lowercase_ , **lowercase_)
91
from math import isqrt def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int: '''simple docstring''' __lowercase= 0 __lowercase= 1 __lowercase= 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
295
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class a__ ( snake_case__ ): _a : int = """deberta-v2""" def __init__( self , _A=1_2_8_1_0_0 , _A=1_5_3_6 , _A=2_4 , _A=2_4 , _A=6_1_4_4 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=0 , _A=0.02 , _A=1E-7 , _A=False , _A=-1 , _A=0 , _A=True , _A=None , _A=0 , _A="gelu" , **_A , ): """simple docstring""" super().__init__(**_A ) __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = relative_attention __lowerCAmelCase = max_relative_positions __lowerCAmelCase = pad_token_id __lowerCAmelCase = position_biased_input # Backwards compatibility if type(_A ) == str: __lowerCAmelCase = [x.strip() for x in pos_att_type.lower().split("|" )] __lowerCAmelCase = pos_att_type __lowerCAmelCase = vocab_size __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = kwargs.get("pooler_hidden_size" , _A ) __lowerCAmelCase = pooler_dropout __lowerCAmelCase = pooler_hidden_act class a__ ( snake_case__ ): @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if self.task == "multiple-choice": __lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: __lowerCAmelCase = {0: "batch", 1: "sequence"} if self._config.type_vocab_size > 0: return OrderedDict( [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] ) else: return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 1_2 def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = -1 , _A = False , _A = None , _A = 3 , _A = 4_0 , _A = 4_0 , _A = None , ): """simple docstring""" __lowerCAmelCase = super().generate_dummy_inputs(preprocessor=_A , framework=_A ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
92
from __future__ import annotations def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= 2 __lowercase= [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
295
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ): lowerCAmelCase_ = KandinskyInpaintPipeline lowerCAmelCase_ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] lowerCAmelCase_ = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] lowerCAmelCase_ = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowerCAmelCase_ = False @property def _snake_case ( self ): """simple docstring""" return 32 @property def _snake_case ( self ): """simple docstring""" return 32 @property def _snake_case ( self ): """simple docstring""" return self.time_input_dim @property def _snake_case ( self ): """simple docstring""" return self.time_input_dim * 4 @property def _snake_case ( self ): """simple docstring""" return 1_00 @property def _snake_case ( self ): """simple docstring""" lowercase_ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def _snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) lowercase_ : Optional[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) lowercase_ : int = MultilingualCLIP(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = text_encoder.eval() return text_encoder @property def _snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) lowercase_ : Any = { '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowercase_ : int = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE ) return model @property def _snake_case ( self ): """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) lowercase_ : Tuple = VQModel(**self.dummy_movq_kwargs ) return model def _snake_case ( self ): """simple docstring""" lowercase_ : int = self.dummy_text_encoder lowercase_ : int = self.dummy_tokenizer lowercase_ : Tuple = self.dummy_unet lowercase_ : List[Any] = self.dummy_movq lowercase_ : Dict = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__SCREAMING_SNAKE_CASE , ) lowercase_ : List[Any] = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ): """simple docstring""" lowercase_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__SCREAMING_SNAKE_CASE ) # create init_image lowercase_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ : List[Any] = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((2_56, 2_56) ) # create mask lowercase_ : Tuple = np.ones((64, 64) , dtype=np.floataa ) lowercase_ : Optional[int] = 0 if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ): lowercase_ : str = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: lowercase_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = { '''prompt''': '''horse''', '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def _snake_case ( self ): """simple docstring""" lowercase_ : List[Any] = '''cpu''' lowercase_ : Dict = self.get_dummy_components() lowercase_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ) lowercase_ : List[Any] = output.images lowercase_ : str = pipe( **self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0] lowercase_ : Optional[Any] = image[0, -3:, -3:, -1] lowercase_ : Tuple = image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) lowercase_ : Optional[int] = np.array( [0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def _snake_case ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def _snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' ) lowercase_ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowercase_ : str = np.ones((7_68, 7_68) , dtype=np.floataa ) lowercase_ : List[Any] = 0 lowercase_ : int = '''a hat''' lowercase_ : int = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = KandinskyInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa ) lowercase_ : Dict = pipeline.to(__SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase_ , lowercase_ : Dict = pipe_prior( __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() lowercase_ : Dict = pipeline( __SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , ) lowercase_ : Tuple = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
93
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase = None lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase = { '''t5-small''': 5_1_2, '''t5-base''': 5_1_2, '''t5-large''': 5_1_2, '''t5-3b''': 5_1_2, '''t5-11b''': 5_1_2, } class A ( A_ ): UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : List[str] =TaTokenizer UpperCamelCase_ : List[int] =[] def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens __lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= vocab_file __lowercase= False if not self.vocab_file else True __lowercase= extra_ids @staticmethod def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: __lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , ) return max_model_length def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file , lowerCAmelCase ) logger.info(f'Copy vocab file to {out_vocab_file}' ) return (out_vocab_file,) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: __lowercase= token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _A (self ): return list( set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def _A (self ): return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
295
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. a :Tuple = [[1, 2, 4], [1, 2, 3, 4]] a :str = DisjunctiveConstraint(_lowerCamelCase ) self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) ) with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def SCREAMING_SNAKE_CASE__ ( self ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). a :Optional[int] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint(_lowerCamelCase ) # fails here def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = [[1, 2, 3], [1, 2, 4]] a :Dict = DisjunctiveConstraint(_lowerCamelCase ) a , a , a :Union[str, Any] = dc.update(1 ) a :Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a :Optional[Any] = dc.update(2 ) a :Union[str, Any] = stepped is True and completed is False and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a :Union[str, Any] = dc.update(3 ) a :Optional[int] = stepped is True and completed is True and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a :List[str] = DisjunctiveConstraint(_lowerCamelCase ) a , a , a :Tuple = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a :Dict = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a :Dict = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) a , a , a :Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() a , a , a :List[str] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) a , a , a :Dict = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a :List[Any] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
94
from collections.abc import Sequence def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float: '''simple docstring''' if not arr: return 0 __lowercase= 0 if allow_empty_subarrays else float('-inf' ) __lowercase= 0.0 for num in arr: __lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowercase= max(lowercase__ , lowercase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
295
0
def _A ( SCREAMING_SNAKE_CASE : str ): """simple docstring""" return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") ) def _A ( SCREAMING_SNAKE_CASE : str ): """simple docstring""" a__ : Tuple =credit_card_number a__ : Union[str, Any] =0 a__ : List[str] =len(SCREAMING_SNAKE_CASE ) - 2 for i in range(SCREAMING_SNAKE_CASE , -1 , -2 ): # double the value of every second digit a__ : List[Any] =int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 a__ : List[str] =cc_number[:i] + str(SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def _A ( SCREAMING_SNAKE_CASE : str ): """simple docstring""" a__ : List[Any] =f'''{credit_card_number} is an invalid credit card number because''' if not credit_card_number.isdigit(): print(f'''{error_message} it has nonnumerical characters.''' ) return False if not 13 <= len(SCREAMING_SNAKE_CASE ) <= 16: print(f'''{error_message} of its length.''' ) return False if not validate_initial_digits(SCREAMING_SNAKE_CASE ): print(f'''{error_message} of its first two digits.''' ) return False if not luhn_validation(SCREAMING_SNAKE_CASE ): print(f'''{error_message} it fails the Luhn check.''' ) return False print(f'''{credit_card_number} is a valid credit card number.''' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("""4111111111111111""") validate_credit_card_number("""32323""")
95
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Any =PriorTransformer UpperCamelCase_ : List[str] ='''hidden_states''' @property def _A (self ): __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _A (self ): return (4, 8) @property def _A (self ): return (4, 8) def _A (self ): __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __lowercase= self.dummy_input return init_dict, inputs_dict def _A (self ): __lowercase, __lowercase= PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(lowerCAmelCase ) __lowercase= model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _A (self ): __lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common() __lowercase= self.model_class(**lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , lowerCAmelCase ) def _A (self ): __lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __lowercase= model.to(lowerCAmelCase ) if hasattr(lowerCAmelCase , 'set_default_attn_processor' ): model.set_default_attn_processor() __lowercase= self.get_dummy_seed_input() with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] __lowercase= output[0, :5].flatten().cpu() print(lowerCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] ) self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= batch_size __lowercase= embedding_dim __lowercase= num_embeddings __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]], [3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]], # fmt: on ] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(lowerCAmelCase ) __lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase ) with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] assert list(sample.shape ) == [1, 7_6_8] __lowercase= sample[0, :8].flatten().cpu() print(lowerCAmelCase ) __lowercase= torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
295
0
"""simple docstring""" import numpy as np def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : List[str] = int(np.ceil((x_end - xa) / h ) ) _lowerCamelCase : Any = np.zeros((n + 1,) ) _lowerCamelCase : Optional[int] = ya _lowerCamelCase : Dict = xa for k in range(lowercase__ ): _lowerCamelCase : Any = f(lowercase__ , y[k] ) _lowerCamelCase : str = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) _lowerCamelCase : Tuple = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) _lowerCamelCase : Optional[Any] = f(x + h , y[k] + h * ka ) _lowerCamelCase : Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
96
def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if collection == []: return [] # get some information about the collection __lowercase= len(lowercase__ ) __lowercase= max(lowercase__ ) __lowercase= min(lowercase__ ) # create the counting array __lowercase= coll_max + 1 - coll_min __lowercase= [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): __lowercase= counting_arr[i] + counting_arr[i - 1] # create the output collection __lowercase= [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): __lowercase= collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
295
0
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def a ( __a , __a , __a=1e-12 ) -> str: '''simple docstring''' UpperCamelCase__ :Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T UpperCamelCase__ :Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T return jnp.matmul(__a , norm_emb_a.T ) class lowercase ( nn.Module ): """simple docstring""" _a = 42 _a = jnp.floataa def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[int] = FlaxCLIPVisionModule(self.config.vision_config ) UpperCamelCase__ :Tuple = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase_ , dtype=self.dtype ) UpperCamelCase__ :Optional[Any] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) ) UpperCamelCase__ :Optional[Any] = self.param( '''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) UpperCamelCase__ :Tuple = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) ) UpperCamelCase__ :List[str] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) ) def __call__( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :List[str] = self.vision_model(UpperCamelCase_ )[1] UpperCamelCase__ :Optional[int] = self.visual_projection(UpperCamelCase_ ) UpperCamelCase__ :List[Any] = jax_cosine_distance(UpperCamelCase_ , self.special_care_embeds ) UpperCamelCase__ :List[str] = jax_cosine_distance(UpperCamelCase_ , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs UpperCamelCase__ :int = 0.0 UpperCamelCase__ :int = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment UpperCamelCase__ :Optional[int] = jnp.round(UpperCamelCase_ , 3 ) UpperCamelCase__ :Union[str, Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase_ ) # Use a lower threshold if an image has any special care concept UpperCamelCase__ :Union[str, Any] = is_special_care * 0.01 UpperCamelCase__ :List[str] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment UpperCamelCase__ :Optional[int] = jnp.round(UpperCamelCase_ , 3 ) UpperCamelCase__ :int = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class lowercase ( A__ ): """simple docstring""" _a = CLIPConfig _a = 'clip_input' _a = FlaxStableDiffusionSafetyCheckerModule def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = jnp.floataa , UpperCamelCase_ = True , **UpperCamelCase_ , ): '''simple docstring''' if input_shape is None: UpperCamelCase__ :Union[str, Any] = (1, 224, 224, 3) UpperCamelCase__ :Dict = self.module_class(config=UpperCamelCase_ , dtype=UpperCamelCase_ , **UpperCamelCase_ ) super().__init__(UpperCamelCase_ , UpperCamelCase_ , input_shape=UpperCamelCase_ , seed=UpperCamelCase_ , dtype=UpperCamelCase_ , _do_init=_do_init ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :Dict = jax.random.normal(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ , UpperCamelCase__ :List[Any] = jax.random.split(UpperCamelCase_ ) UpperCamelCase__ :Tuple = {'''params''': params_rng, '''dropout''': dropout_rng} UpperCamelCase__ :List[str] = self.module.init(UpperCamelCase_ , UpperCamelCase_ )['''params'''] return random_params def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , ): '''simple docstring''' UpperCamelCase__ :Any = jnp.transpose(UpperCamelCase_ , (0, 2, 3, 1) ) return self.module.apply( {'''params''': params or self.params} , jnp.array(UpperCamelCase_ , dtype=jnp.floataa ) , rngs={} , )
97
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_input_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _A (self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForMaskedLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForTokenClassification(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_choices __lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Any =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase_ : Optional[int] =( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : str =True UpperCamelCase_ : str =True UpperCamelCase_ : Union[str, Any] =True UpperCamelCase_ : Optional[int] =True def _A (self ): __lowercase= DistilBertModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase ) @slow def _A (self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= DistilBertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __lowercase= True __lowercase= model_class(config=lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= torch.jit.trace( lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) ) __lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase ) loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' ) __lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0] __lowercase= torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , lowerCAmelCase ) __lowercase= torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
295
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class snake_case ( __UpperCAmelCase , unittest.TestCase ): """simple docstring""" snake_case__ = KandinskyVaaPipeline snake_case__ = [ "image_embeds", "negative_image_embeds", ] snake_case__ = ["image_embeds", "negative_image_embeds"] snake_case__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] snake_case__ = False @property def __lowerCAmelCase ( self : int ): return 32 @property def __lowerCAmelCase ( self : Optional[int] ): return 32 @property def __lowerCAmelCase ( self : Tuple ): return self.time_input_dim @property def __lowerCAmelCase ( self : Dict ): return self.time_input_dim * 4 @property def __lowerCAmelCase ( self : Union[str, Any] ): return 100 @property def __lowerCAmelCase ( self : Union[str, Any] ): torch.manual_seed(0 ) UpperCAmelCase__ = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } UpperCAmelCase__ = UNetaDConditionModel(**lowerCamelCase__ ) return model @property def __lowerCAmelCase ( self : Any ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self : Dict ): torch.manual_seed(0 ) UpperCAmelCase__ = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self : Union[str, Any] ): UpperCAmelCase__ = self.dummy_unet UpperCAmelCase__ = self.dummy_movq UpperCAmelCase__ = DDIMScheduler( num_train_timesteps=1_000 ,beta_schedule='linear' ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=lowerCamelCase__ ,) UpperCAmelCase__ = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict=0 ): UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( lowerCamelCase__ ) if str(lowerCamelCase__ ).startswith('mps' ): UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ ) else: UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) UpperCAmelCase__ = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def __lowerCAmelCase ( self : List[Any] ): UpperCAmelCase__ = 'cpu' UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ ) UpperCAmelCase__ = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) ) UpperCAmelCase__ = output.images UpperCAmelCase__ = pipe( **self.get_dummy_inputs(lowerCamelCase__ ) ,return_dict=lowerCamelCase__ ,)[0] UpperCAmelCase__ = image[0, -3:, -3:, -1] UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ = np.array( [0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class snake_case ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : Optional[Any] ): UpperCAmelCase__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' ) UpperCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa ) pipe_prior.to(lowerCamelCase__ ) UpperCAmelCase__ = KandinskyVaaPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder' ,torch_dtype=torch.floataa ) UpperCAmelCase__ = pipeline.to(lowerCamelCase__ ) pipeline.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCAmelCase__ = 'red cat, 4k photo' UpperCAmelCase__ = torch.Generator(device='cuda' ).manual_seed(0 ) UpperCAmelCase__ , UpperCAmelCase__ = pipe_prior( lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple() UpperCAmelCase__ = torch.Generator(device='cuda' ).manual_seed(0 ) UpperCAmelCase__ = pipeline( image_embeds=lowerCamelCase__ ,negative_image_embeds=lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=100 ,output_type='np' ,) UpperCAmelCase__ = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
98
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= [False] * len(lowercase__ ) __lowercase= [] queue.append(lowercase__ ) __lowercase= True while queue: __lowercase= queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) __lowercase= True __lowercase= u return visited[t] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= [-1] * (len(lowercase__ )) __lowercase= 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowercase= float('Inf' ) __lowercase= sink while s != source: # Find the minimum value in select path __lowercase= min(lowercase__ , graph[parent[s]][s] ) __lowercase= parent[s] max_flow += path_flow __lowercase= sink while v != source: __lowercase= parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase= parent[v] return max_flow lowerCAmelCase = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase ,lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
295
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase : List[str] = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[int] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
99
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= get_failure_array(lowercase__ ) # 2) Step through text searching for pattern __lowercase, __lowercase= 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase= failure[j - 1] continue i += 1 return False def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0] __lowercase= 0 __lowercase= 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase= failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase = '''abc1abc12''' lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCAmelCase = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase = '''ABABX''' lowerCAmelCase = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCAmelCase = '''AAAB''' lowerCAmelCase = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCAmelCase = '''abcdabcy''' lowerCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCAmelCase = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
295
0
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ): super().__init__() self.register_modules(transformer=lowerCAmelCase__ , vae=lowerCAmelCase__ , scheduler=lowerCAmelCase__) # create a imagenet -> id dictionary for easier use __SCREAMING_SNAKE_CASE = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(""","""): __SCREAMING_SNAKE_CASE = int(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = dict(sorted(self.labels.items())) def snake_case_ ( self , lowerCAmelCase__): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = list(lowerCAmelCase__) for l in label: if l not in self.labels: raise ValueError( f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.") return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ): __SCREAMING_SNAKE_CASE = len(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = self.transformer.config.sample_size __SCREAMING_SNAKE_CASE = self.transformer.config.in_channels __SCREAMING_SNAKE_CASE = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase__ , device=self.device , dtype=self.transformer.dtype , ) __SCREAMING_SNAKE_CASE = torch.cat([latents] * 2) if guidance_scale > 1 else latents __SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase__ , device=self.device).reshape(-1) __SCREAMING_SNAKE_CASE = torch.tensor([1_0_0_0] * batch_size , device=self.device) __SCREAMING_SNAKE_CASE = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowerCAmelCase__) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: __SCREAMING_SNAKE_CASE = latent_model_input[: len(lowerCAmelCase__) // 2] __SCREAMING_SNAKE_CASE = torch.cat([half, half] , dim=0) __SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = t if not torch.is_tensor(lowerCAmelCase__): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) __SCREAMING_SNAKE_CASE = latent_model_input.device.type == """mps""" if isinstance(lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = torch.floataa if is_mps else torch.floataa else: __SCREAMING_SNAKE_CASE = torch.intaa if is_mps else torch.intaa __SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=lowerCAmelCase__ , device=latent_model_input.device) elif len(timesteps.shape) == 0: __SCREAMING_SNAKE_CASE = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __SCREAMING_SNAKE_CASE = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output __SCREAMING_SNAKE_CASE = self.transformer( lowerCAmelCase__ , timestep=lowerCAmelCase__ , class_labels=lowerCAmelCase__).sample # perform guidance if guidance_scale > 1: __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = torch.split(lowerCAmelCase__ , len(lowerCAmelCase__) // 2 , dim=0) __SCREAMING_SNAKE_CASE = uncond_eps + guidance_scale * (cond_eps - uncond_eps) __SCREAMING_SNAKE_CASE = torch.cat([half_eps, half_eps] , dim=0) __SCREAMING_SNAKE_CASE = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = torch.split(lowerCAmelCase__ , lowerCAmelCase__ , dim=1) else: __SCREAMING_SNAKE_CASE = noise_pred # compute previous image: x_t -> x_t-1 __SCREAMING_SNAKE_CASE = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__).prev_sample if guidance_scale > 1: __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = latent_model_input.chunk(2 , dim=0) else: __SCREAMING_SNAKE_CASE = latent_model_input __SCREAMING_SNAKE_CASE = 1 / self.vae.config.scaling_factor * latents __SCREAMING_SNAKE_CASE = self.vae.decode(lowerCAmelCase__).sample __SCREAMING_SNAKE_CASE = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __SCREAMING_SNAKE_CASE = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE = self.numpy_to_pil(lowerCAmelCase__) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowerCAmelCase__)
100
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
0
from __future__ import annotations from collections.abc import Iterator class lowercase : def __init__( self ,A__): lowercase = value lowercase = None lowercase = None class lowercase : def __init__( self ,A__): lowercase = tree def A__ ( self ,A__): if node is None: return 0 return node.value + ( self.depth_first_search(node.left) + self.depth_first_search(node.right) ) def __iter__( self): yield self.depth_first_search(self.tree) if __name__ == "__main__": import doctest doctest.testmod()
101
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A ( enum.Enum ): UpperCamelCase_ : Optional[int] =0 UpperCamelCase_ : Tuple =1 UpperCamelCase_ : Optional[int] =2 @add_end_docstrings(A_ ) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__(self , *lowerCAmelCase , **lowerCAmelCase ): super().__init__(*lowerCAmelCase , **lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowercase= None if self.model.config.prefix is not None: __lowercase= self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowercase= self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params ) __lowercase= {**self._preprocess_params, **preprocess_params} __lowercase= {**self._forward_params, **forward_params} def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ): __lowercase= {} if prefix is not None: __lowercase= prefix if prefix: __lowercase= self.tokenizer( lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected' ' [None, \'hole\']' ) __lowercase= handle_long_generation preprocess_params.update(lowerCAmelCase ) __lowercase= generate_kwargs __lowercase= {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.TENSORS if return_type is not None: __lowercase= return_type if clean_up_tokenization_spaces is not None: __lowercase= clean_up_tokenization_spaces if stop_sequence is not None: __lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) if len(lowerCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowercase= stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _A (self , *lowerCAmelCase , **lowerCAmelCase ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase ) def __call__(self , lowerCAmelCase , **lowerCAmelCase ): return super().__call__(lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prompt_text if handle_long_generation == "hole": __lowercase= inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowercase= generate_kwargs['max_new_tokens'] else: __lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowercase= self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowercase= inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowercase= inputs['attention_mask'][:, -keep_length:] return inputs def _A (self , lowerCAmelCase , **lowerCAmelCase ): __lowercase= model_inputs['input_ids'] __lowercase= model_inputs.get('attention_mask' , lowerCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: __lowercase= None __lowercase= None __lowercase= 1 else: __lowercase= input_ids.shape[0] __lowercase= model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowercase= generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowercase= 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowercase= 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase ) __lowercase= generated_sequence.shape[0] if self.framework == "pt": __lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ): __lowercase= model_outputs['generated_sequence'][0] __lowercase= model_outputs['input_ids'] __lowercase= model_outputs['prompt_text'] __lowercase= generated_sequence.numpy().tolist() __lowercase= [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowercase= {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowercase= self.tokenizer.decode( lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowercase= 0 else: __lowercase= len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) ) if return_type == ReturnType.FULL_TEXT: __lowercase= prompt_text + text[prompt_length:] else: __lowercase= text[prompt_length:] __lowercase= {'generated_text': all_text} records.append(lowerCAmelCase ) return records
295
0
"""simple docstring""" import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=3 , a_=True , a_=True , a_=0.1 , a_=0.1 , a_=2_24 , a_=10_00 , a_=[3, 3, 6, 4] , a_=[48, 56, 1_12, 2_20] , ): '''simple docstring''' __snake_case : Any = parent __snake_case : Tuple = batch_size __snake_case : Dict = num_channels __snake_case : Dict = is_training __snake_case : List[Any] = use_labels __snake_case : Dict = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : Optional[int] = num_labels __snake_case : str = image_size __snake_case : List[str] = layer_depths __snake_case : Dict = embed_dims def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Optional[int] = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=a_ , layer_scale_init_value=1E-5 , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : str = SwiftFormerModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Dict = self.num_labels __snake_case : Tuple = SwiftFormerForImageClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Any = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) __snake_case : Optional[Any] = SwiftFormerForImageClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : List[str] = model(a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' ((__snake_case) , (__snake_case) , (__snake_case)) : Optional[int] = self.prepare_config_and_inputs() __snake_case : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =(SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCamelCase__ =( {'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = SwiftFormerModelTester(self ) __snake_case : Tuple = ConfigTester( self , config_class=a_ , has_text_modality=a_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : int = model_class(a_ ) __snake_case : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = model_class(a_ ) __snake_case : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : List[str] = [*signature.parameters.keys()] __snake_case : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = SwiftFormerModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @unittest.skip(reason='''SwiftFormer does not output attentions''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' def check_hidden_states_output(a_ , a_ , a_ ): __snake_case : List[str] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(a_ , a_ ) ) __snake_case : Optional[Any] = outputs.hidden_states __snake_case : Union[str, Any] = 8 self.assertEqual(len(a_ ) , a_ ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(a_ ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Any = True check_hidden_states_output(a_ , a_ , a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : int = True check_hidden_states_output(a_ , a_ , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' def _config_zero_init(a_ ): __snake_case : Optional[Any] = copy.deepcopy(a_ ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(a_ , a_ , 1E-10 ) if isinstance(getattr(a_ , a_ , a_ ) , a_ ): __snake_case : Union[str, Any] = _config_zero_init(getattr(a_ , a_ ) ) setattr(a_ , a_ , a_ ) return configs_no_init __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = _config_zero_init(a_ ) for model_class in self.all_model_classes: __snake_case : Dict = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def lowercase ( ) ->List[Any]: """simple docstring""" __snake_case : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(a_ ) __snake_case : Optional[int] = self.default_image_processor __snake_case : Dict = prepare_img() __snake_case : Tuple = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ ) # forward pass with torch.no_grad(): __snake_case : Any = model(**a_ ) # verify the logits __snake_case : int = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , a_ ) __snake_case : List[Any] = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
102
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): @register_to_config def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ): super().__init__() # pass init params to Encoder __lowercase= Encoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , ) __lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) __lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase ) __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) # pass init params to Decoder __lowercase= Decoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= self.encoder(lowerCAmelCase ) __lowercase= self.quant_conv(lowerCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ): # also go through quantization layer if not force_not_quantize: __lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase ) else: __lowercase= h __lowercase= self.post_quant_conv(lowerCAmelCase ) __lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= sample __lowercase= self.encode(lowerCAmelCase ).latents __lowercase= self.decode(lowerCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase )
295
0
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class __snake_case ( UpperCamelCase_ ): def UpperCAmelCase__ ( self : List[Any] , A_ : str): with open(A_ , encoding='''utf-8''') as input_file: lowerCAmelCase_ : List[Any] = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''') lowerCAmelCase_ : Any = input_file.read() lowerCAmelCase_ : int = regexp.search(A_) return match def UpperCAmelCase__ ( self : Optional[int] , A_ : str): with open(A_ , encoding='''utf-8''') as input_file: lowerCAmelCase_ : int = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL) lowerCAmelCase_ : Dict = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowerCAmelCase_ : Optional[Any] = regexp.finditer(A_) lowerCAmelCase_ : str = [match for match in matches if match is not None and match.group(1) is not None] return matches[0] if matches else None def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : int = Path('''./datasets''') lowerCAmelCase_ : str = list(dataset_paths.absolute().glob('''**/*.py''')) for dataset in dataset_files: if self._no_encoding_on_file_open(str(A_)): raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""") def UpperCAmelCase__ ( self : Dict): lowerCAmelCase_ : Tuple = Path('''./datasets''') lowerCAmelCase_ : Union[str, Any] = list(dataset_paths.absolute().glob('''**/*.py''')) for dataset in dataset_files: if self._no_print_statements(str(A_)): raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""")
103
import os import numpy import onnx def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= a.name __lowercase= b.name __lowercase= '' __lowercase= '' __lowercase= a == b __lowercase= name_a __lowercase= name_b return res def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase__ , lowercase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) _graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(lowercase__ , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= list(model.graph.initializer ) __lowercase= list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __lowercase= inits[i].name __lowercase= inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' __lowercase= os.path.dirname(lowercase__ ) __lowercase= os.path.basename(lowercase__ ) __lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) ) __lowercase= list(model.graph.initializer ) __lowercase= set() __lowercase= {} __lowercase= [] __lowercase= 0 for i in range(len(lowercase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowercase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowercase__ ) dup_set.add(lowercase__ ) __lowercase= inits[j].data_type __lowercase= numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 1_1: mem_size *= 8 else: print('unexpected data type: ' , lowercase__ ) total_reduced_size += mem_size __lowercase= inits[i].name __lowercase= inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase__ ) else: __lowercase= [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' ) __lowercase= sorted(lowercase__ ) _remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ ) __lowercase= 'optimized_' + model_file_name __lowercase= os.path.join(lowercase__ , lowercase__ ) onnx.save(lowercase__ , lowercase__ ) return new_model
295
0
'''simple docstring''' from collections.abc import Generator from math import sin def _A ( A__ ): """simple docstring""" if len(A__ ) != 32: raise ValueError('''Input must be of length 32''' ) __lowercase = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def _A ( A__ ): """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) __lowercase = format(A__ , '''08x''' )[-8:] __lowercase = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def _A ( A__ ): """simple docstring""" __lowercase = b'''''' for char in message: bit_string += format(A__ , '''08b''' ).encode('''utf-8''' ) __lowercase = format(len(A__ ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(A__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def _A ( A__ ): """simple docstring""" if len(A__ ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(A__ ) , 512 ): __lowercase = bit_string[pos : pos + 512] __lowercase = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def _A ( A__ ): """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) __lowercase = format(A__ , '''032b''' ) __lowercase = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(A__ , 2 ) def _A ( A__ , A__ ): """simple docstring""" return (a + b) % 2**32 def _A ( A__ , A__ ): """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def _A ( A__ ): """simple docstring""" __lowercase = preprocess(A__ ) __lowercase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __lowercase = 0x6745_2301 __lowercase = 0xefcd_ab89 __lowercase = 0x98ba_dcfe __lowercase = 0x1032_5476 __lowercase = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(A__ ): __lowercase = aa __lowercase = ba __lowercase = ca __lowercase = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __lowercase = d ^ (b & (c ^ d)) __lowercase = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __lowercase = c ^ (d & (b ^ c)) __lowercase = (5 * i + 1) % 16 elif i <= 47: __lowercase = b ^ c ^ d __lowercase = (3 * i + 5) % 16 else: __lowercase = c ^ (b | not_aa(A__ )) __lowercase = (7 * i) % 16 __lowercase = (f + a + added_consts[i] + block_words[g]) % 2**32 __lowercase = d __lowercase = c __lowercase = b __lowercase = sum_aa(A__ , left_rotate_aa(A__ , shift_amounts[i] ) ) # Add hashed chunk to running total __lowercase = sum_aa(A__ , A__ ) __lowercase = sum_aa(A__ , A__ ) __lowercase = sum_aa(A__ , A__ ) __lowercase = sum_aa(A__ , A__ ) __lowercase = reformat_hex(A__ ) + reformat_hex(A__ ) + reformat_hex(A__ ) + reformat_hex(A__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
104
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCAmelCase = parser.parse_args() if args.check_lib: lowerCAmelCase = importlib.import_module('''transformers''') lowerCAmelCase = Path(transformers_module.__file__).parent else: lowerCAmelCase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
295
0
"""simple docstring""" import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __UpperCamelCase : @staticmethod def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]: pass @is_pipeline_test @require_vision class __UpperCamelCase ( unittest.TestCase ): @require_torch def __a ( self ) -> Dict: a : List[Any] = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) a : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) a : List[str] = image_classifier(lowerCAmelCase__ , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(lowerCAmelCase__ ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) a : Optional[int] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ [ {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, ], ] , ) @require_tf def __a ( self ) -> int: a : Tuple = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) a : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) a : int = image_classifier(lowerCAmelCase__ , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) a : List[str] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ [ {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, ], [ {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, {"score": 0.333, "label": ANY(lowerCAmelCase__ )}, ], ] , ) @slow @require_torch def __a ( self ) -> Union[str, Any]: a : Optional[int] = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes a : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) a : str = image_classifier(lowerCAmelCase__ , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) a : List[str] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def __a ( self ) -> Optional[Any]: a : List[str] = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes a : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) a : Optional[Any] = image_classifier(lowerCAmelCase__ , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) a : Optional[int] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
105
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if len(lowercase__ ) <= 1 or n <= 1: return insert_next(lowercase__ , n - 1 ) rec_insertion_sort(lowercase__ , n - 1 ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowercase, __lowercase= ( collection[index], collection[index - 1], ) insert_next(lowercase__ , index + 1 ) if __name__ == "__main__": lowerCAmelCase = input('''Enter integers separated by spaces: ''') lowerCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
295
0
"""simple docstring""" from __future__ import annotations import math def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ): if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , A_ , A_ , A_ ) , minimax(depth + 1 , node_index * 2 + 1 , A_ , A_ , A_ ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , A_ , A_ , A_ ) , minimax(depth + 1 , node_index * 2 + 1 , A_ , A_ , A_ ) , ) ) def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : Any = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] lowerCAmelCase__ : Tuple = math.log(len(A_ ) , 2 ) print(f'Optimal value : {minimax(0 , 0 , A_ , A_ , A_ )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
106
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list: '''simple docstring''' __lowercase= [] __lowercase= 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) __lowercase= index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
295
0
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.g4dn.xlarge""", """results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.6, """eval_loss""": 0.9}, }, { """framework""": """tensorflow""", """script""": """run_tf.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.g4dn.xlarge""", """results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.3, """eval_loss""": 0.9}, }, ] ) class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : str ) -> Union[str, Any]: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__lowerCamelCase , ) assert hasattr(self , "env" ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[Any]=1 ) -> Optional[int]: # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , ) def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ) -> int: TrainingJobAnalytics(__lowerCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) def __UpperCAmelCase ( self : Tuple ) -> Any: # create estimator a = self.create_estimator() # run training estimator.fit() # result dataframe a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis a = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) a = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping a = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase )
107
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict: '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase__ ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''The csv file to plot.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) UpperCamelCase_ : Optional[List[str]] =list_field( default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: int(lowercase__ ) return True except ValueError: return False def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: float(lowercase__ ) return True except ValueError: return False class A : def __init__(self , lowerCAmelCase ): __lowercase= args __lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: __lowercase= csv.DictReader(lowerCAmelCase ) for row in reader: __lowercase= row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None __lowercase= int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None __lowercase= float(row['result'] ) def _A (self ): __lowercase, __lowercase= plt.subplots() __lowercase= 'Time usage' if self.args.is_time else 'Memory usage' __lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): __lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) ) __lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) ) __lowercase= self.result_dict[model_name]['result'] ((__lowercase), (__lowercase))= ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __lowercase= ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __lowercase= np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , ) else: __lowercase= np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__lowercase), (__lowercase))= ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) __lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )] plt.scatter( lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' ) plt.plot(lowerCAmelCase , lowerCAmelCase , '--' ) title_str += f' {label_model_name} vs.' __lowercase= title_str[:-4] __lowercase= 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase ) plt.xlabel(lowerCAmelCase ) plt.ylabel(lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= HfArgumentParser(lowercase__ ) __lowercase= parser.parse_args_into_dataclasses()[0] __lowercase= Plot(args=lowercase__ ) plot.plot() if __name__ == "__main__": main()
295
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {} class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : str ="llama" a : List[str] =["past_key_values"] def __init__( self , snake_case__=32_000 , snake_case__=4_096 , snake_case__=11_008 , snake_case__=32 , snake_case__=32 , snake_case__=None , snake_case__="silu" , snake_case__=2_048 , snake_case__=0.02 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=1 , snake_case__=False , snake_case__=None , **snake_case__ , ): """simple docstring""" lowerCAmelCase : Optional[Any] = vocab_size lowerCAmelCase : str = max_position_embeddings lowerCAmelCase : str = hidden_size lowerCAmelCase : Optional[int] = intermediate_size lowerCAmelCase : Any = num_hidden_layers lowerCAmelCase : List[str] = num_attention_heads # for backward compatibility if num_key_value_heads is None: lowerCAmelCase : Tuple = num_attention_heads lowerCAmelCase : Dict = num_key_value_heads lowerCAmelCase : Optional[Any] = hidden_act lowerCAmelCase : Optional[Any] = initializer_range lowerCAmelCase : Any = rms_norm_eps lowerCAmelCase : List[Any] = pretraining_tp lowerCAmelCase : int = use_cache lowerCAmelCase : List[str] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , ) def lowercase__ ( self ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"""got {self.rope_scaling}""" ) lowerCAmelCase : Optional[Any] = self.rope_scaling.get("type" , snake_case__ ) lowerCAmelCase : int = self.rope_scaling.get("factor" , snake_case__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
108
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int =DPRContextEncoderTokenizer class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer lowerCAmelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(A_ ) class A : def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) elif titles is None or texts is None: __lowercase= titles if texts is None else texts return super().__call__( lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles] __lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts] __lowercase= len(lowerCAmelCase ) __lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.' __lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase ) ] } if return_attention_mask is not False: __lowercase= [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase= attention_mask return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ): __lowercase= reader_input['input_ids'] __lowercase, __lowercase, __lowercase= reader_output[:3] __lowercase= len(lowerCAmelCase ) __lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ ) __lowercase= [] for doc_id in sorted_docs: __lowercase= list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase= sequence_ids.index(self.pad_token_id ) else: __lowercase= len(lowerCAmelCase ) __lowercase= self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= [] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase ) __lowercase= [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __lowercase= end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class A ( A_ , A_ ): UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : Dict =DPRReaderTokenizer
295
0
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging A: Union[str, Any] = logging.get_logger(__name__) A: str = { "huggingface/informer-tourism-monthly": ( "https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json" ), # See all Informer models at https://huggingface.co/models?filter=informer } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Any = 'informer' __lowerCAmelCase : str = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "student_t" , _SCREAMING_SNAKE_CASE = "nll" , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "mean" , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 0.05 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE = "prob" , _SCREAMING_SNAKE_CASE = 5 , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: '''simple docstring''' UpperCAmelCase : Optional[Any] = prediction_length UpperCAmelCase : Any = context_length or prediction_length UpperCAmelCase : Tuple = distribution_output UpperCAmelCase : Optional[int] = loss UpperCAmelCase : int = input_size UpperCAmelCase : Union[str, Any] = num_time_features UpperCAmelCase : Dict = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] UpperCAmelCase : Optional[int] = scaling UpperCAmelCase : Tuple = num_dynamic_real_features UpperCAmelCase : List[Any] = num_static_real_features UpperCAmelCase : Any = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) UpperCAmelCase : List[str] = cardinality else: UpperCAmelCase : List[str] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) UpperCAmelCase : int = embedding_dimension else: UpperCAmelCase : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCAmelCase : Optional[Any] = num_parallel_samples # Transformer architecture configuration UpperCAmelCase : int = input_size * len(self.lags_sequence ) + self._number_of_features UpperCAmelCase : Any = d_model UpperCAmelCase : List[Any] = encoder_attention_heads UpperCAmelCase : Optional[Any] = decoder_attention_heads UpperCAmelCase : int = encoder_ffn_dim UpperCAmelCase : Any = decoder_ffn_dim UpperCAmelCase : str = encoder_layers UpperCAmelCase : List[str] = decoder_layers UpperCAmelCase : List[Any] = dropout UpperCAmelCase : Tuple = attention_dropout UpperCAmelCase : Any = activation_dropout UpperCAmelCase : List[str] = encoder_layerdrop UpperCAmelCase : List[Any] = decoder_layerdrop UpperCAmelCase : str = activation_function UpperCAmelCase : List[str] = init_std UpperCAmelCase : Any = use_cache # Informer UpperCAmelCase : Any = attention_type UpperCAmelCase : Tuple = sampling_factor UpperCAmelCase : int = distil super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
109
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A ( nn.Module ): def __init__(self ): super().__init__() __lowercase= nn.Linear(3 , 4 ) __lowercase= nn.BatchNormad(4 ) __lowercase= nn.Linear(4 , 5 ) def _A (self , lowerCAmelCase ): return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) ) class A ( A_ ): def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): return (args[0] + 1,) + args[1:], kwargs class A ( A_ ): def _A (self , lowerCAmelCase , lowerCAmelCase ): return output + 1 class A ( unittest.TestCase ): def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(test_model._hf_hook , lowerCAmelCase ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase ) self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(x + 1 ) __lowercase= test_model(x + 2 ) __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) ) self.assertTrue(outputa.requires_grad ) __lowercase= True __lowercase= test_model(lowerCAmelCase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) ) __lowercase= torch.randn(2 , 3 ).to(0 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(0 ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload __lowercase= { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
295
0
'''simple docstring''' from __future__ import annotations from typing import Any class lowercase_ : """simple docstring""" def __init__( self : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Any = 0 ): __lowercase , __lowercase = row, column __lowercase = [[default_value for c in range(lowercase__ )] for r in range(lowercase__ )] def __str__( self : List[Any] ): __lowercase = F"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier __lowercase = 0 for row_vector in self.array: for obj in row_vector: __lowercase = max(lowercase__ ,len(str(lowercase__ ) ) ) __lowercase = F"%{max_element_length}s" # Make string and return def single_line(lowercase__ : Union[str, Any] ) -> str: nonlocal string_format_identifier __lowercase = '''[''' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(lowercase__ ) for row_vector in self.array ) return s def __repr__( self : Any ): return str(self ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[int] ): if not (isinstance(lowercase__ ,(list, tuple) ) and len(lowercase__ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : Union[str, Any] ,lowercase__ : Optional[int] ): assert self.validate_indicies(lowercase__ ) return self.array[loc[0]][loc[1]] def __setitem__( self : Optional[int] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ): assert self.validate_indicies(lowercase__ ) __lowercase = value def __add__( self : Any ,lowercase__ : Optional[int] ): assert isinstance(lowercase__ ,lowercase__ ) assert self.row == another.row and self.column == another.column # Add __lowercase = Matrix(self.row ,self.column ) for r in range(self.row ): for c in range(self.column ): __lowercase = self[r, c] + another[r, c] return result def __neg__( self : Optional[Any] ): __lowercase = Matrix(self.row ,self.column ) for r in range(self.row ): for c in range(self.column ): __lowercase = -self[r, c] return result def __sub__( self : Optional[Any] ,lowercase__ : Dict ): return self + (-another) def __mul__( self : int ,lowercase__ : List[Any] ): if isinstance(lowercase__ ,(int, float) ): # Scalar multiplication __lowercase = Matrix(self.row ,self.column ) for r in range(self.row ): for c in range(self.column ): __lowercase = self[r, c] * another return result elif isinstance(lowercase__ ,lowercase__ ): # Matrix multiplication assert self.column == another.row __lowercase = Matrix(self.row ,another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __lowercase = F"Unsupported type given for another ({type(lowercase__ )})" raise TypeError(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = Matrix(self.column ,self.row ) for r in range(self.row ): for c in range(self.column ): __lowercase = self[r, c] return result def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ): assert isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __lowercase = v.transpose() __lowercase = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _A ( ): """simple docstring""" __lowercase = Matrix(3 , 3 , 0 ) for i in range(3 ): __lowercase = 1 print(F"a^(-1) is {ainv}" ) # u, v __lowercase = Matrix(3 , 1 , 0 ) __lowercase , __lowercase , __lowercase = 1, 2, -3 __lowercase = Matrix(3 , 1 , 0 ) __lowercase , __lowercase , __lowercase = 4, -2, 5 print(F"u is {u}" ) print(F"v is {v}" ) print(F"uv^T is {u * v.transpose()}" ) # Sherman Morrison print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase__ , lowercase__ )}" ) def _A ( ): """simple docstring""" import doctest doctest.testmod() testa()
104
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A ( unittest.TestCase ): def _A (self ): __lowercase= logging.get_logger() # the current default level is logging.WARNING __lowercase= logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) def _A (self ): __lowercase= logging.get_verbosity() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase ) __lowercase= logging.log_levels[env_level_str] __lowercase= logging.get_verbosity() self.assertEqual( lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __lowercase= '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowercase= logging.logging.getLogger() with CaptureLogger(lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def _A (self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
295
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE_:Optional[int] = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE_:str = { """gpt2""": 1_024, """gpt2-medium""": 1_024, """gpt2-large""": 1_024, """gpt2-xl""": 1_024, """distilgpt2""": 1_024, } class SCREAMING_SNAKE_CASE__ ( A_ ): '''simple docstring''' __lowerCamelCase : Any = VOCAB_FILES_NAMES __lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : str = ['''input_ids''', '''attention_mask'''] __lowerCamelCase : Union[str, Any] = GPTaTokenizer def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__="<|endoftext|>", lowerCamelCase__="<|endoftext|>", lowerCamelCase__="<|endoftext|>", lowerCamelCase__=False, **lowerCamelCase__, ): super().__init__( lowerCamelCase__, lowerCamelCase__, tokenizer_file=lowerCamelCase__, unk_token=lowerCamelCase__, bos_token=lowerCamelCase__, eos_token=lowerCamelCase__, add_prefix_space=lowerCamelCase__, **lowerCamelCase__, ) A : Tuple = kwargs.pop("""add_bos_token""", lowerCamelCase__ ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""", lowerCamelCase__ ) != add_prefix_space: A : str = getattr(lowerCamelCase__, pre_tok_state.pop("""type""" ) ) A : Optional[Any] = add_prefix_space A : List[str] = pre_tok_class(**lowerCamelCase__ ) A : List[str] = add_prefix_space def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ): A : str = kwargs.get("""is_split_into_words""", lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ): A : Tuple = kwargs.get("""is_split_into_words""", lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ): A : Dict = self._tokenizer.model.save(lowerCamelCase__, name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__ ): A : int = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] ) if len(lowerCamelCase__ ) > self.model_max_length: A : List[str] = input_ids[-self.model_max_length :] return input_ids
116
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase = '''▁''' lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } lowerCAmelCase = { '''google/pegasus-xsum''': 5_1_2, } lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= offset if additional_special_tokens is not None: if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError( f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is' f' {type(lowerCAmelCase )}' ) __lowercase= ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 ) ] if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) __lowercase= additional_special_tokens_extended else: __lowercase= [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )] __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) __lowercase= mask_token_sent __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # add special tokens to encoder dict __lowercase= { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __lowercase= {v: k for k, v in self.encoder.items()} @property def _A (self ): return len(self.sp_model ) + self.offset def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A (self , lowerCAmelCase ): return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __lowercase= self.sp_model.piece_to_id(lowerCAmelCase ) return sp_id + self.offset def _A (self , lowerCAmelCase ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __lowercase= self.sp_model.IdToPiece(index - self.offset ) return token def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def _A (self , lowerCAmelCase=False ): return 1 def _A (self , lowerCAmelCase ): __lowercase= set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return self._special_token_mask(lowerCAmelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
295
0
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig lowercase_ : Dict = logging.get_logger(__name__) # General docstring lowercase_ : List[str] = 'ResNetConfig' # Base docstring lowercase_ : str = 'microsoft/resnet-50' lowercase_ : Union[str, Any] = [1, 20_48, 7, 7] # Image classification docstring lowercase_ : List[str] = 'microsoft/resnet-50' lowercase_ : Dict = 'tiger cat' lowercase_ : str = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class __lowerCAmelCase ( nn.Module ): def __init__( self : str , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : str = 3 , snake_case__ : List[str] = 1 , snake_case__ : str = "relu" ): """simple docstring""" super().__init__() _UpperCAmelCase = nn.Convad( snake_case__ , snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=kernel_size // 2 , bias=snake_case__ ) _UpperCAmelCase = nn.BatchNormad(snake_case__ ) _UpperCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity() def UpperCamelCase ( self : Dict , snake_case__ : Tuple ): """simple docstring""" _UpperCAmelCase = self.convolution(snake_case__ ) _UpperCAmelCase = self.normalization(snake_case__ ) _UpperCAmelCase = self.activation(snake_case__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): def __init__( self : Optional[int] , snake_case__ : List[Any] ): """simple docstring""" super().__init__() _UpperCAmelCase = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _UpperCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _UpperCAmelCase = config.num_channels def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict ): """simple docstring""" _UpperCAmelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) _UpperCAmelCase = self.embedder(snake_case__ ) _UpperCAmelCase = self.pooler(snake_case__ ) return embedding class __lowerCAmelCase ( nn.Module ): def __init__( self : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] = 2 ): """simple docstring""" super().__init__() _UpperCAmelCase = nn.Convad(snake_case__ , snake_case__ , kernel_size=1 , stride=snake_case__ , bias=snake_case__ ) _UpperCAmelCase = nn.BatchNormad(snake_case__ ) def UpperCamelCase ( self : Any , snake_case__ : Optional[int] ): """simple docstring""" _UpperCAmelCase = self.convolution(snake_case__ ) _UpperCAmelCase = self.normalization(snake_case__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): def __init__( self : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] = 1 , snake_case__ : Any = "relu" ): """simple docstring""" super().__init__() _UpperCAmelCase = in_channels != out_channels or stride != 1 _UpperCAmelCase = ( ResNetShortCut(snake_case__ , snake_case__ , stride=snake_case__ ) if should_apply_shortcut else nn.Identity() ) _UpperCAmelCase = nn.Sequential( ResNetConvLayer(snake_case__ , snake_case__ , stride=snake_case__ ) , ResNetConvLayer(snake_case__ , snake_case__ , activation=snake_case__ ) , ) _UpperCAmelCase = ACTaFN[activation] def UpperCamelCase ( self : Dict , snake_case__ : Optional[Any] ): """simple docstring""" _UpperCAmelCase = hidden_state _UpperCAmelCase = self.layer(snake_case__ ) _UpperCAmelCase = self.shortcut(snake_case__ ) hidden_state += residual _UpperCAmelCase = self.activation(snake_case__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): def __init__( self : Tuple , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : str = 1 , snake_case__ : int = "relu" , snake_case__ : Optional[Any] = 4 ): """simple docstring""" super().__init__() _UpperCAmelCase = in_channels != out_channels or stride != 1 _UpperCAmelCase = out_channels // reduction _UpperCAmelCase = ( ResNetShortCut(snake_case__ , snake_case__ , stride=snake_case__ ) if should_apply_shortcut else nn.Identity() ) _UpperCAmelCase = nn.Sequential( ResNetConvLayer(snake_case__ , snake_case__ , kernel_size=1 ) , ResNetConvLayer(snake_case__ , snake_case__ , stride=snake_case__ ) , ResNetConvLayer(snake_case__ , snake_case__ , kernel_size=1 , activation=snake_case__ ) , ) _UpperCAmelCase = ACTaFN[activation] def UpperCamelCase ( self : int , snake_case__ : Union[str, Any] ): """simple docstring""" _UpperCAmelCase = hidden_state _UpperCAmelCase = self.layer(snake_case__ ) _UpperCAmelCase = self.shortcut(snake_case__ ) hidden_state += residual _UpperCAmelCase = self.activation(snake_case__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Dict = 2 , snake_case__ : List[str] = 2 , ): """simple docstring""" super().__init__() _UpperCAmelCase = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer _UpperCAmelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case__ , snake_case__ , stride=snake_case__ , activation=config.hidden_act ) , *[layer(snake_case__ , snake_case__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def UpperCamelCase ( self : str , snake_case__ : List[Any] ): """simple docstring""" _UpperCAmelCase = input for layer in self.layers: _UpperCAmelCase = layer(snake_case__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): def __init__( self : Union[str, Any] , snake_case__ : List[Any] ): """simple docstring""" super().__init__() _UpperCAmelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case__ , config.depths[1:] ): self.stages.append(ResNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ ) ) def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : str = False , snake_case__ : Union[str, Any] = True ): """simple docstring""" _UpperCAmelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCAmelCase = hidden_states + (hidden_state,) _UpperCAmelCase = stage_module(snake_case__ ) if output_hidden_states: _UpperCAmelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case__ , hidden_states=snake_case__ , ) class __lowerCAmelCase ( A_ ): snake_case_ : Any = ResNetConfig snake_case_ : Optional[int] = '''resnet''' snake_case_ : Optional[Any] = '''pixel_values''' snake_case_ : Tuple = True def UpperCamelCase ( self : Dict , snake_case__ : Any ): """simple docstring""" if isinstance(snake_case__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(snake_case__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[Any]=False ): """simple docstring""" if isinstance(snake_case__ , snake_case__ ): _UpperCAmelCase = value lowercase_ : int = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowercase_ : Union[str, Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare ResNet model outputting raw features without any specific head on top." , A_ , ) class __lowerCAmelCase ( A_ ): def __init__( self : Dict , snake_case__ : List[Any] ): """simple docstring""" super().__init__(snake_case__ ) _UpperCAmelCase = config _UpperCAmelCase = ResNetEmbeddings(snake_case__ ) _UpperCAmelCase = ResNetEncoder(snake_case__ ) _UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] = None , snake_case__ : str = None ): """simple docstring""" _UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = self.embedder(snake_case__ ) _UpperCAmelCase = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ ) _UpperCAmelCase = encoder_outputs[0] _UpperCAmelCase = self.pooler(snake_case__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , A_ , ) class __lowerCAmelCase ( A_ ): def __init__( self : Optional[Any] , snake_case__ : Optional[Any] ): """simple docstring""" super().__init__(snake_case__ ) _UpperCAmelCase = config.num_labels _UpperCAmelCase = ResNetModel(snake_case__ ) # classification head _UpperCAmelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase ( self : Tuple , snake_case__ : List[Any] = None , snake_case__ : int = None , snake_case__ : str = None , snake_case__ : List[Any] = None , ): """simple docstring""" _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = self.resnet(snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ ) _UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCAmelCase = self.classifier(snake_case__ ) _UpperCAmelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCAmelCase = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCAmelCase = "single_label_classification" else: _UpperCAmelCase = "multi_label_classification" if self.config.problem_type == "regression": _UpperCAmelCase = MSELoss() if self.num_labels == 1: _UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCAmelCase = loss_fct(snake_case__ , snake_case__ ) elif self.config.problem_type == "single_label_classification": _UpperCAmelCase = CrossEntropyLoss() _UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCAmelCase = BCEWithLogitsLoss() _UpperCAmelCase = loss_fct(snake_case__ , snake_case__ ) if not return_dict: _UpperCAmelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states ) @add_start_docstrings( "\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , A_ , ) class __lowerCAmelCase ( A_ , A_ ): def __init__( self : Dict , snake_case__ : Optional[Any] ): """simple docstring""" super().__init__(snake_case__ ) super()._init_backbone(snake_case__ ) _UpperCAmelCase = [config.embedding_size] + config.hidden_sizes _UpperCAmelCase = ResNetEmbeddings(snake_case__ ) _UpperCAmelCase = ResNetEncoder(snake_case__ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case__ ) @replace_return_docstrings(output_type=snake_case__ , config_class=_CONFIG_FOR_DOC ) def UpperCamelCase ( self : Any , snake_case__ : str , snake_case__ : Optional[int] = None , snake_case__ : Tuple = None ): """simple docstring""" _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase = self.embedder(snake_case__ ) _UpperCAmelCase = self.encoder(snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _UpperCAmelCase = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case__ , )
133
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
0
'''simple docstring''' import random def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : Optional[int] = False ) -> dict: """simple docstring""" A__ : int ={i: [] for i in range(lowercase__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(lowercase__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(lowercase__ ): for j in range(i + 1, lowercase__ ): if random.random() < probability: graph[i].append(lowercase__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(lowercase__ ) return graph def __lowerCamelCase ( __snake_case : List[str] ) -> dict: """simple docstring""" return { i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ ) } if __name__ == "__main__": import doctest doctest.testmod()
134
from math import isqrt def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int: '''simple docstring''' __lowercase= 0 __lowercase= 1 __lowercase= 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
295
0
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : int ,lowercase_ : Any ,lowercase_ : int=9_9 ,lowercase_ : Optional[Any]=1_3 ,lowercase_ : str=1_6 ,lowercase_ : List[str]=7 ,lowercase_ : int=True ,lowercase_ : str=True ,lowercase_ : Optional[Any]=True ,lowercase_ : Any=False ,lowercase_ : Optional[int]=True ,lowercase_ : List[Any]=2 ,lowercase_ : Optional[int]=3_2 ,lowercase_ : List[Any]=4 ,lowercase_ : str=4 ,lowercase_ : int=3_0 ,lowercase_ : Dict=0 ,lowercase_ : Dict=1 ,lowercase_ : Tuple=2 ,lowercase_ : Any=None ,): lowerCAmelCase__ : int = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Dict = decoder_seq_length # For common tests lowerCAmelCase__ : Optional[int] = self.decoder_seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : int = use_attention_mask lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[int] = d_model lowerCAmelCase__ : Any = d_model lowerCAmelCase__ : Any = decoder_layers lowerCAmelCase__ : Any = decoder_layers lowerCAmelCase__ : Dict = decoder_ffn_dim lowerCAmelCase__ : int = decoder_attention_heads lowerCAmelCase__ : Tuple = decoder_attention_heads lowerCAmelCase__ : Union[str, Any] = eos_token_id lowerCAmelCase__ : Optional[Any] = bos_token_id lowerCAmelCase__ : List[Any] = pad_token_id lowerCAmelCase__ : Dict = decoder_start_token_id lowerCAmelCase__ : Optional[Any] = use_cache lowerCAmelCase__ : List[str] = max_position_embeddings lowerCAmelCase__ : Any = None lowerCAmelCase__ : List[str] = decoder_seq_length lowerCAmelCase__ : Optional[int] = 2 lowerCAmelCase__ : int = 1 def __lowerCAmelCase ( self : Tuple ): lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size ) lowerCAmelCase__ : Dict = None if self.use_attention_mask: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] ,vocab_size=2 ) lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size ) lowerCAmelCase__ : Any = TrOCRConfig( vocab_size=self.vocab_size ,d_model=self.d_model ,decoder_layers=self.decoder_layers ,decoder_ffn_dim=self.decoder_ffn_dim ,decoder_attention_heads=self.decoder_attention_heads ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,use_cache=self.use_cache ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,max_position_embeddings=self.max_position_embeddings ,) return (config, input_ids, attention_mask, lm_labels) def __lowerCAmelCase ( self : Tuple ,lowercase_ : str ,lowercase_ : int ,lowercase_ : List[str] ,lowercase_ : int ,): lowerCAmelCase__ : Optional[int] = True lowerCAmelCase__ : List[Any] = TrOCRDecoder(config=lowercase_ ).to(lowercase_ ).eval() lowerCAmelCase__ : int = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass lowerCAmelCase__ : List[Any] = model(lowercase_ ,use_cache=lowercase_ ) lowerCAmelCase__ : List[str] = model(lowercase_ ) lowerCAmelCase__ : Any = model(lowercase_ ,use_cache=lowercase_ ) self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) ) self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) + 1 ) lowerCAmelCase__ : str = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids lowerCAmelCase__ : Dict = ids_tensor((2, 1) ,config.vocab_size - 1 ) + 1 # append to next input_ids and lowerCAmelCase__ : List[str] = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowerCAmelCase__ : Union[str, Any] = model(lowercase_ )['''last_hidden_state'''] lowerCAmelCase__ : List[str] = model(lowercase_ ,past_key_values=lowercase_ )['''last_hidden_state'''] # select random slice lowerCAmelCase__ : Tuple = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowerCAmelCase__ : Optional[int] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() lowerCAmelCase__ : Tuple = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(lowercase_ ,lowercase_ ,atol=1E-3 ) def __lowerCAmelCase ( self : Union[str, Any] ): lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = config_and_inputs lowerCAmelCase__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , unittest.TestCase ): """simple docstring""" lowercase__ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowercase__ = (TrOCRForCausalLM,) if is_torch_available() else () lowercase__ = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowercase__ = True lowercase__ = False def __lowerCAmelCase ( self : Optional[int] ): lowerCAmelCase__ : List[str] = TrOCRStandaloneDecoderModelTester(self ,is_training=lowercase_ ) lowerCAmelCase__ : Tuple = ConfigTester(self ,config_class=lowercase_ ) def __lowerCAmelCase ( self : Dict ): pass def __lowerCAmelCase ( self : Any ): pass def __lowerCAmelCase ( self : str ): pass def __lowerCAmelCase ( self : Optional[Any] ): self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Union[str, Any] ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*lowercase_ ) def __lowerCAmelCase ( self : Union[str, Any] ): return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __lowerCAmelCase ( self : int ): pass
106
from __future__ import annotations def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= 2 __lowercase= [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
295
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json", "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json", "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json", "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json", "bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json", "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json", "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json", "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json", "bert-large-uncased-whole-word-masking": ( "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json" ), "bert-large-cased-whole-word-masking": ( "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json" ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json" ), "bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json" ), "bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json", "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json", "bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json", "cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json", "cl-tohoku/bert-base-japanese-whole-word-masking": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json" ), "cl-tohoku/bert-base-japanese-char": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json" ), "cl-tohoku/bert-base-japanese-char-whole-word-masking": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json" ), "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json", # See all BERT models at https://huggingface.co/models?filter=bert } class a_ ( A_ ): """simple docstring""" __UpperCAmelCase = '''bert''' def __init__( self : List[Any] ,snake_case : Optional[Any]=30522 ,snake_case : List[str]=768 ,snake_case : int=12 ,snake_case : str=12 ,snake_case : int=3072 ,snake_case : Optional[Any]="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Optional[int]=0.1 ,snake_case : Union[str, Any]=512 ,snake_case : Union[str, Any]=2 ,snake_case : str=0.02 ,snake_case : Any=1e-12 ,snake_case : Optional[int]=0 ,snake_case : Tuple="absolute" ,snake_case : Optional[Any]=True ,snake_case : List[Any]=None ,**snake_case : int ,): super().__init__(pad_token_id=snake_case ,**snake_case ) SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =max_position_embeddings SCREAMING_SNAKE_CASE =type_vocab_size SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =position_embedding_type SCREAMING_SNAKE_CASE =use_cache SCREAMING_SNAKE_CASE =classifier_dropout class a_ ( A_ ): """simple docstring""" @property def _lowerCAmelCase ( self : Tuple ): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE ={0: 'batch', 1: 'choice', 2: 'sequence'} else: SCREAMING_SNAKE_CASE ={0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
334
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase = None lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase = { '''t5-small''': 5_1_2, '''t5-base''': 5_1_2, '''t5-large''': 5_1_2, '''t5-3b''': 5_1_2, '''t5-11b''': 5_1_2, } class A ( A_ ): UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : List[str] =TaTokenizer UpperCamelCase_ : List[int] =[] def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens __lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= vocab_file __lowercase= False if not self.vocab_file else True __lowercase= extra_ids @staticmethod def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: __lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , ) return max_model_length def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file , lowerCAmelCase ) logger.info(f'Copy vocab file to {out_vocab_file}' ) return (out_vocab_file,) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: __lowercase= token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _A (self ): return list( set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def _A (self ): return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
295
0
from typing import List import numpy as np def snake_case_ ( snake_case ) -> int: lowercase__: Union[str, Any] = {key: len(lowercase__ ) for key, value in gen_kwargs.items() if isinstance(lowercase__ , lowercase__ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) lowercase__: int = max(lists_lengths.values() , default=0 ) return max(1 , lowercase__ ) def snake_case_ ( snake_case , snake_case ) -> List[range]: lowercase__: Any = [] for group_idx in range(lowercase__ ): lowercase__: Tuple = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowercase__: Dict = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowercase__: Any = range(lowercase__ , start + num_shards_to_add ) shards_indices_per_group.append(lowercase__ ) return shards_indices_per_group def snake_case_ ( snake_case , snake_case ) -> List[dict]: lowercase__: List[Any] = _number_of_shards_in_gen_kwargs(lowercase__ ) if num_shards == 1: return [dict(lowercase__ )] else: lowercase__: Tuple = _distribute_shards(num_shards=lowercase__ , max_num_jobs=lowercase__ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(lowercase__ , lowercase__ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(lowercase__ ) ) ] def snake_case_ ( snake_case ) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , lowercase__ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def snake_case_ ( snake_case , snake_case ) -> dict: lowercase__: Union[str, Any] = {len(lowercase__ ) for value in gen_kwargs.values() if isinstance(lowercase__ , lowercase__ )} lowercase__: List[Any] = {} for size in list_sizes: lowercase__: List[Any] = list(range(lowercase__ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowercase__: str = dict(lowercase__ ) for key, value in shuffled_kwargs.items(): if isinstance(lowercase__ , lowercase__ ): lowercase__: str = [value[i] for i in indices_per_size[len(lowercase__ )]] return shuffled_kwargs
196
from collections.abc import Sequence def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float: '''simple docstring''' if not arr: return 0 __lowercase= 0 if allow_empty_subarrays else float('-inf' ) __lowercase= 0.0 for num in arr: __lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowercase= max(lowercase__ , lowercase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
295
0
'''simple docstring''' def a ( __a , __a ) -> List[Any]: '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def a ( __a , __a=0 ) -> List[str]: '''simple docstring''' return sorted(lowercase__ , key=lambda __a : x[column] ) def a ( __a , __a , __a=float('''inf''' ) ) -> Optional[Any]: '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 , lowercase__ ): UpperCamelCase__ :str = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: UpperCamelCase__ :Optional[int] = current_dis return min_dis def a ( __a , __a , __a=float('''inf''' ) ) -> Union[str, Any]: '''simple docstring''' for i in range(min(6 , points_counts - 1 ) , lowercase__ ): for j in range(max(0 , i - 6 ) , lowercase__ ): UpperCamelCase__ :Dict = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: UpperCamelCase__ :Dict = current_dis return min_dis def a ( __a , __a , __a ) -> Any: '''simple docstring''' if points_counts <= 3: return dis_between_closest_pair(lowercase__ , lowercase__ ) # recursion UpperCamelCase__ :List[Any] = points_counts // 2 UpperCamelCase__ :Optional[Any] = closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[:mid] , lowercase__ ) UpperCamelCase__ :Optional[int] = closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[mid:] , points_counts - mid ) UpperCamelCase__ :str = min(lowercase__ , lowercase__ ) UpperCamelCase__ :int = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(lowercase__ ) UpperCamelCase__ :Tuple = dis_between_closest_in_strip( lowercase__ , len(lowercase__ ) , lowercase__ ) return min(lowercase__ , lowercase__ ) def a ( __a , __a ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :Optional[int] = column_based_sort(lowercase__ , column=0 ) UpperCamelCase__ :Optional[Any] = column_based_sort(lowercase__ , column=1 ) return ( closest_pair_of_points_sqr( lowercase__ , lowercase__ , lowercase__ ) ) ** 0.5 if __name__ == "__main__": __snake_case = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
97
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Any =PriorTransformer UpperCamelCase_ : List[str] ='''hidden_states''' @property def _A (self ): __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _A (self ): return (4, 8) @property def _A (self ): return (4, 8) def _A (self ): __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __lowercase= self.dummy_input return init_dict, inputs_dict def _A (self ): __lowercase, __lowercase= PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(lowerCAmelCase ) __lowercase= model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _A (self ): __lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common() __lowercase= self.model_class(**lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , lowerCAmelCase ) def _A (self ): __lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __lowercase= model.to(lowerCAmelCase ) if hasattr(lowerCAmelCase , 'set_default_attn_processor' ): model.set_default_attn_processor() __lowercase= self.get_dummy_seed_input() with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] __lowercase= output[0, :5].flatten().cpu() print(lowerCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] ) self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= batch_size __lowercase= embedding_dim __lowercase= num_embeddings __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]], [3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]], # fmt: on ] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(lowerCAmelCase ) __lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase ) with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] assert list(sample.shape ) == [1, 7_6_8] __lowercase= sample[0, :8].flatten().cpu() print(lowerCAmelCase ) __lowercase= torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
295
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 _UpperCAmelCase = data_utils.TransfoXLTokenizer _UpperCAmelCase = data_utils.TransfoXLCorpus _UpperCAmelCase = data_utils _UpperCAmelCase = data_utils def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Tuple ,__lowercase : Dict ,__lowercase : int ): '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(lowercase__ ,'rb' ) as fp: A_ : str = pickle.load(lowercase__ ,encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) A_ : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' ) A_ : str = corpus.vocab.__dict__ torch.save(lowercase__ ,lowercase__ ) A_ : List[str] = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' ,lowercase__ ) A_ : Optional[int] = pytorch_dump_folder_path + '/' + CORPUS_NAME print(f'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(lowercase__ ,lowercase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model A_ : Any = os.path.abspath(lowercase__ ) A_ : Any = os.path.abspath(lowercase__ ) print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": A_ : Optional[Any] = TransfoXLConfig() else: A_ : str = TransfoXLConfig.from_json_file(lowercase__ ) print(f'''Building PyTorch model from configuration: {config}''' ) A_ : str = TransfoXLLMHeadModel(lowercase__ ) A_ : Any = load_tf_weights_in_transfo_xl(lowercase__ ,lowercase__ ,lowercase__ ) # Save pytorch-model A_ : Dict = os.path.join(lowercase__ ,lowercase__ ) A_ : List[str] = os.path.join(lowercase__ ,lowercase__ ) print(f'''Save PyTorch model to {os.path.abspath(lowercase__ )}''' ) torch.save(model.state_dict() ,lowercase__ ) print(f'''Save configuration file to {os.path.abspath(lowercase__ )}''' ) with open(lowercase__ ,'w' ,encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--tf_checkpoint_path""", default="""""", type=str, help="""An optional path to a TensorFlow checkpoint path to be converted.""", ) parser.add_argument( """--transfo_xl_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--transfo_xl_dataset_file""", default="""""", type=str, help="""An optional dataset file to be converted in a vocabulary.""", ) _UpperCAmelCase = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
140
def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if collection == []: return [] # get some information about the collection __lowercase= len(lowercase__ ) __lowercase= max(lowercase__ ) __lowercase= min(lowercase__ ) # create the counting array __lowercase= coll_max + 1 - coll_min __lowercase= [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): __lowercase= counting_arr[i] + counting_arr[i - 1] # create the output collection __lowercase= [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): __lowercase= collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
295
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> bool: return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def a__ ( lowerCAmelCase__ ) -> list[str]: UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : List[Any] = 11 UpperCAmelCase__ : List[Any] = int('''1''' + '''0''' * digit_len ) for num in range(lowercase__ , lowercase__ ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowercase__ , lowercase__ ): solutions.append(F"""{num}/{den}""" ) den += 1 num += 1 UpperCAmelCase__ : Optional[Any] = 10 return solutions def a__ ( lowerCAmelCase__ = 2 ) -> int: UpperCAmelCase__ : List[Any] = 1.0 for fraction in fraction_list(lowercase__ ): UpperCAmelCase__ : Any = Fraction(lowercase__ ) result *= frac.denominator / frac.numerator return int(lowercase__ ) if __name__ == "__main__": print(solution())
181
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_input_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _A (self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForMaskedLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForTokenClassification(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_choices __lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Any =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase_ : Optional[int] =( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : str =True UpperCamelCase_ : str =True UpperCamelCase_ : Union[str, Any] =True UpperCamelCase_ : Optional[int] =True def _A (self ): __lowercase= DistilBertModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase ) @slow def _A (self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= DistilBertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __lowercase= True __lowercase= model_class(config=lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= torch.jit.trace( lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) ) __lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase ) loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' ) __lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0] __lowercase= torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , lowerCAmelCase ) __lowercase= torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
295
0
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def UpperCamelCase_ ( snake_case_ : Dict ) -> Tuple: '''simple docstring''' return getitem, k def UpperCamelCase_ ( snake_case_ : Optional[int] , snake_case_ : str ) -> int: '''simple docstring''' return setitem, k, v def UpperCamelCase_ ( snake_case_ : int ) -> List[str]: '''simple docstring''' return delitem, k def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , *snake_case_ : Any ) -> Tuple: '''simple docstring''' try: return fun(lowercase__ , *lowercase__ ), None except Exception as e: return None, e _A : Union[str, Any] = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) _A : Optional[int] = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] _A : Union[str, Any] = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] _A : Dict = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] _A : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] _A : Optional[int] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]: '''simple docstring''' __lowerCAmelCase = HashMap(initial_block_size=4 ) __lowerCAmelCase = {} for _, (fun, *args) in enumerate(lowercase__ ): __lowerCAmelCase , __lowerCAmelCase = _run_operation(lowercase__ , lowercase__ , *lowercase__ ) __lowerCAmelCase , __lowerCAmelCase = _run_operation(lowercase__ , lowercase__ , *lowercase__ ) assert my_res == py_res assert str(lowercase__ ) == str(lowercase__ ) assert set(lowercase__ ) == set(lowercase__ ) assert len(lowercase__ ) == len(lowercase__ ) assert set(my.items() ) == set(py.items() ) def UpperCamelCase_ ( ) -> str: '''simple docstring''' def is_public(snake_case_ : Optional[Any] ) -> bool: return not name.startswith("""_""" ) __lowerCAmelCase = {name for name in dir({} ) if is_public(lowercase__ )} __lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(lowercase__ )} assert dict_public_names > hash_public_names
229
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= [False] * len(lowercase__ ) __lowercase= [] queue.append(lowercase__ ) __lowercase= True while queue: __lowercase= queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) __lowercase= True __lowercase= u return visited[t] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= [-1] * (len(lowercase__ )) __lowercase= 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowercase= float('Inf' ) __lowercase= sink while s != source: # Find the minimum value in select path __lowercase= min(lowercase__ , graph[parent[s]][s] ) __lowercase= parent[s] max_flow += path_flow __lowercase= sink while v != source: __lowercase= parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase= parent[v] return max_flow lowerCAmelCase = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase ,lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
295
0
'''simple docstring''' def _A ( A__ , A__ , A__ ): """simple docstring""" if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate __lowercase = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __lowercase = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
104
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= get_failure_array(lowercase__ ) # 2) Step through text searching for pattern __lowercase, __lowercase= 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase= failure[j - 1] continue i += 1 return False def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0] __lowercase= 0 __lowercase= 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase= failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase = '''abc1abc12''' lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCAmelCase = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase = '''ABABX''' lowerCAmelCase = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCAmelCase = '''AAAB''' lowerCAmelCase = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCAmelCase = '''abcdabcy''' lowerCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCAmelCase = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
295
0
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE_:int = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE_:Any = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class SCREAMING_SNAKE_CASE__ ( A_ ): '''simple docstring''' __lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES __lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Dict = ['''input_ids''', '''attention_mask'''] __lowerCamelCase : Optional[int] = RobertaTokenizer def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__="replace", lowerCamelCase__="<s>", lowerCamelCase__="</s>", lowerCamelCase__="</s>", lowerCamelCase__="<s>", lowerCamelCase__="<unk>", lowerCamelCase__="<pad>", lowerCamelCase__="<mask>", lowerCamelCase__=False, lowerCamelCase__=True, **lowerCamelCase__, ): super().__init__( lowerCamelCase__, lowerCamelCase__, tokenizer_file=lowerCamelCase__, errors=lowerCamelCase__, bos_token=lowerCamelCase__, eos_token=lowerCamelCase__, sep_token=lowerCamelCase__, cls_token=lowerCamelCase__, unk_token=lowerCamelCase__, pad_token=lowerCamelCase__, mask_token=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__, **lowerCamelCase__, ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""", lowerCamelCase__ ) != add_prefix_space: A : Optional[Any] = getattr(lowerCamelCase__, pre_tok_state.pop("""type""" ) ) A : List[str] = add_prefix_space A : List[Any] = pre_tok_class(**lowerCamelCase__ ) A : str = add_prefix_space A : Union[str, Any] = """post_processor""" A : Tuple = getattr(self.backend_tokenizer, lowerCamelCase__, lowerCamelCase__ ) if tokenizer_component_instance: A : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : List[str] = tuple(state["""sep"""] ) if "cls" in state: A : Tuple = tuple(state["""cls"""] ) A : Tuple = False if state.get("""add_prefix_space""", lowerCamelCase__ ) != add_prefix_space: A : Optional[int] = add_prefix_space A : int = True if state.get("""trim_offsets""", lowerCamelCase__ ) != trim_offsets: A : Dict = trim_offsets A : Optional[Any] = True if changes_to_apply: A : Optional[Any] = getattr(lowerCamelCase__, state.pop("""type""" ) ) A : List[Any] = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer, lowerCamelCase__, lowerCamelCase__ ) @property def _lowerCAmelCase ( self ): if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _lowerCAmelCase ( self, lowerCamelCase__ ): A : List[str] = AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__, lowerCamelCase__ ) else value A : List[Any] = value def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ): A : Any = kwargs.get("""is_split_into_words""", lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ): A : List[Any] = kwargs.get("""is_split_into_words""", lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ): A : int = self._tokenizer.model.save(lowerCamelCase__, name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ): A : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ): A : Any = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
116
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
0
from collections.abc import Callable import numpy as np def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' _UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) ) _UpperCAmelCase = np.zeros((n + 1,) ) _UpperCAmelCase = ya _UpperCAmelCase = xa for k in range(lowercase__ ): _UpperCAmelCase = y[k] + step_size * ode_func(lowercase__ , y[k] ) _UpperCAmelCase = y[k] + ( (step_size / 2) * (ode_func(lowercase__ , y[k] ) + ode_func(x + step_size , lowercase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
133
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A ( enum.Enum ): UpperCamelCase_ : Optional[int] =0 UpperCamelCase_ : Tuple =1 UpperCamelCase_ : Optional[int] =2 @add_end_docstrings(A_ ) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__(self , *lowerCAmelCase , **lowerCAmelCase ): super().__init__(*lowerCAmelCase , **lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowercase= None if self.model.config.prefix is not None: __lowercase= self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowercase= self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params ) __lowercase= {**self._preprocess_params, **preprocess_params} __lowercase= {**self._forward_params, **forward_params} def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ): __lowercase= {} if prefix is not None: __lowercase= prefix if prefix: __lowercase= self.tokenizer( lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected' ' [None, \'hole\']' ) __lowercase= handle_long_generation preprocess_params.update(lowerCAmelCase ) __lowercase= generate_kwargs __lowercase= {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.TENSORS if return_type is not None: __lowercase= return_type if clean_up_tokenization_spaces is not None: __lowercase= clean_up_tokenization_spaces if stop_sequence is not None: __lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) if len(lowerCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowercase= stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _A (self , *lowerCAmelCase , **lowerCAmelCase ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase ) def __call__(self , lowerCAmelCase , **lowerCAmelCase ): return super().__call__(lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prompt_text if handle_long_generation == "hole": __lowercase= inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowercase= generate_kwargs['max_new_tokens'] else: __lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowercase= self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowercase= inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowercase= inputs['attention_mask'][:, -keep_length:] return inputs def _A (self , lowerCAmelCase , **lowerCAmelCase ): __lowercase= model_inputs['input_ids'] __lowercase= model_inputs.get('attention_mask' , lowerCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: __lowercase= None __lowercase= None __lowercase= 1 else: __lowercase= input_ids.shape[0] __lowercase= model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowercase= generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowercase= 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowercase= 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase ) __lowercase= generated_sequence.shape[0] if self.framework == "pt": __lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ): __lowercase= model_outputs['generated_sequence'][0] __lowercase= model_outputs['input_ids'] __lowercase= model_outputs['prompt_text'] __lowercase= generated_sequence.numpy().tolist() __lowercase= [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowercase= {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowercase= self.tokenizer.decode( lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowercase= 0 else: __lowercase= len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) ) if return_type == ReturnType.FULL_TEXT: __lowercase= prompt_text + text[prompt_length:] else: __lowercase= text[prompt_length:] __lowercase= {'generated_text': all_text} records.append(lowerCAmelCase ) return records
295
0
'''simple docstring''' import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Optional[int] ) -> str: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A__ , A__ : str =FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) A__ : int ="""A painting of a squirrel eating a burger""" A__ : Tuple =jax.device_count() A__ : Optional[int] =num_samples * [prompt] A__ : Any =sd_pipe.prepare_inputs(lowerCAmelCase_ ) A__ : Any =replicate(lowerCAmelCase_ ) A__ : Optional[int] =shard(lowerCAmelCase_ ) A__ : List[Any] =jax.random.PRNGKey(0 ) A__ : Optional[int] =jax.random.split(lowerCAmelCase_ , jax.device_count() ) A__ : int =sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=25 , jit=lowerCAmelCase_ )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) A__ : Any =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) A__ : Tuple =images[0, 2_53:2_56, 2_53:2_56, -1] A__ : int =jnp.asarray(jax.device_get(image_slice.flatten() ) ) A__ : Optional[int] =jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def lowercase__ ( self : List[Any] ) -> Dict: '''simple docstring''' A__ : str ="""stabilityai/stable-diffusion-2""" A__ , A__ : int =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase_ , subfolder="""scheduler""" ) A__ , A__ : Any =FlaxStableDiffusionPipeline.from_pretrained( lowerCAmelCase_ , scheduler=lowerCAmelCase_ , revision="""bf16""" , dtype=jnp.bfloataa , ) A__ : Any =scheduler_params A__ : Tuple ="""A painting of a squirrel eating a burger""" A__ : Dict =jax.device_count() A__ : Tuple =num_samples * [prompt] A__ : str =sd_pipe.prepare_inputs(lowerCAmelCase_ ) A__ : Union[str, Any] =replicate(lowerCAmelCase_ ) A__ : Tuple =shard(lowerCAmelCase_ ) A__ : Optional[Any] =jax.random.PRNGKey(0 ) A__ : Optional[Any] =jax.random.split(lowerCAmelCase_ , jax.device_count() ) A__ : List[Any] =sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=25 , jit=lowerCAmelCase_ )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) A__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) A__ : str =images[0, 2_53:2_56, 2_53:2_56, -1] A__ : Union[str, Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) ) A__ : Union[str, Any] =jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
134
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): @register_to_config def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ): super().__init__() # pass init params to Encoder __lowercase= Encoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , ) __lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) __lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase ) __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) # pass init params to Decoder __lowercase= Decoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= self.encoder(lowerCAmelCase ) __lowercase= self.quant_conv(lowerCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ): # also go through quantization layer if not force_not_quantize: __lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase ) else: __lowercase= h __lowercase= self.post_quant_conv(lowerCAmelCase ) __lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= sample __lowercase= self.encode(lowerCAmelCase ).latents __lowercase= self.decode(lowerCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase )
295
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __UpperCamelCase : str = None __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase : Optional[Any] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } __UpperCamelCase : Dict = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } __UpperCamelCase : Optional[Any] = '''▁''' class SCREAMING_SNAKE_CASE ( A_ ): """simple docstring""" lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = AlbertTokenizer def __init__( self : Any ,lowercase_ : Any=None ,lowercase_ : Dict=None ,lowercase_ : Dict=True ,lowercase_ : int=True ,lowercase_ : Union[str, Any]=False ,lowercase_ : Optional[Any]="[CLS]" ,lowercase_ : Dict="[SEP]" ,lowercase_ : Dict="<unk>" ,lowercase_ : int="[SEP]" ,lowercase_ : str="<pad>" ,lowercase_ : int="[CLS]" ,lowercase_ : str="[MASK]" ,**lowercase_ : Union[str, Any] ,): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase__ : Union[str, Any] = ( AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ,normalized=lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) else mask_token ) super().__init__( lowercase_ ,tokenizer_file=lowercase_ ,do_lower_case=lowercase_ ,remove_space=lowercase_ ,keep_accents=lowercase_ ,bos_token=lowercase_ ,eos_token=lowercase_ ,unk_token=lowercase_ ,sep_token=lowercase_ ,pad_token=lowercase_ ,cls_token=lowercase_ ,mask_token=lowercase_ ,**lowercase_ ,) lowerCAmelCase__ : Tuple = do_lower_case lowerCAmelCase__ : Tuple = remove_space lowerCAmelCase__ : Optional[Any] = keep_accents lowerCAmelCase__ : Any = vocab_file lowerCAmelCase__ : Dict = False if not self.vocab_file else True def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Tuple ,lowercase_ : str = None ): lowerCAmelCase__ : Tuple = [self.sep_token_id] lowerCAmelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Tuple ,lowercase_ : List[str] = None ): lowerCAmelCase__ : Dict = [self.sep_token_id] lowerCAmelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[str] ,lowercase_ : List[Any] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowercase_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase__ : str = os.path.join( lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file ,lowercase_ ) return (out_vocab_file,)
106
import os import numpy import onnx def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= a.name __lowercase= b.name __lowercase= '' __lowercase= '' __lowercase= a == b __lowercase= name_a __lowercase= name_b return res def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase__ , lowercase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) _graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(lowercase__ , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= list(model.graph.initializer ) __lowercase= list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __lowercase= inits[i].name __lowercase= inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' __lowercase= os.path.dirname(lowercase__ ) __lowercase= os.path.basename(lowercase__ ) __lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) ) __lowercase= list(model.graph.initializer ) __lowercase= set() __lowercase= {} __lowercase= [] __lowercase= 0 for i in range(len(lowercase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowercase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowercase__ ) dup_set.add(lowercase__ ) __lowercase= inits[j].data_type __lowercase= numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 1_1: mem_size *= 8 else: print('unexpected data type: ' , lowercase__ ) total_reduced_size += mem_size __lowercase= inits[i].name __lowercase= inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase__ ) else: __lowercase= [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' ) __lowercase= sorted(lowercase__ ) _remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ ) __lowercase= 'optimized_' + model_file_name __lowercase= os.path.join(lowercase__ , lowercase__ ) onnx.save(lowercase__ , lowercase__ ) return new_model
295
0
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass _lowerCamelCase =(3, 9, -11, 0, 7, 5, 1, -1) _lowerCamelCase =(4, 6, 2, 0, 8, 10, 3, -2) @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = 42 class a_ : """simple docstring""" def __init__( self : Any ,snake_case : Union[str, Any] ): SCREAMING_SNAKE_CASE =None for i in sorted(snake_case ,reverse=snake_case ): SCREAMING_SNAKE_CASE =Node(snake_case ,self.head ) def __iter__( self : List[str] ): SCREAMING_SNAKE_CASE =self.head while node: yield node.data SCREAMING_SNAKE_CASE =node.next_node def __len__( self : Optional[Any] ): return sum(1 for _ in self ) def __str__( self : Tuple ): return " -> ".join([str(snake_case ) for node in self] ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase =SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
334
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCAmelCase = parser.parse_args() if args.check_lib: lowerCAmelCase = importlib.import_module('''transformers''') lowerCAmelCase = Path(transformers_module.__file__).parent else: lowerCAmelCase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
295
0
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) __lowerCAmelCase = [ '''cross_validation.py''', '''gradient_accumulation.py''', '''local_sgd.py''', '''multi_process_metrics.py''', '''memory.py''', '''automatic_gradient_accumulation.py''', '''fsdp_with_peak_mem_tracking.py''', '''deepspeed_with_config_support.py''', '''megatron_lm_gpt_pretraining.py''', ] class __a ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> str: '''simple docstring''' lowercase__: Tuple = None lowercase__: Optional[Any] = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) lowercase__: int = os.path.abspath('examples' ) for item in os.listdir(lowerCAmelCase__ ): if item not in EXCLUDE_EXAMPLES: lowercase__: Optional[int] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) if os.path.isfile(lowerCAmelCase__ ) and ".py" in item_path: with self.subTest( tested_script=lowerCAmelCase__ , feature_script=lowerCAmelCase__ , tested_section='main()' if parser_only else 'training_function()' , ): lowercase__: Union[str, Any] = compare_against_test( os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) lowercase__: List[str] = '\n'.join(lowerCAmelCase__ ) if special_strings is not None: for string in special_strings: lowercase__: Dict = diff.replace(lowerCAmelCase__ , '' ) self.assertEqual(lowerCAmelCase__ , '' ) def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' self.one_complete_example('complete_nlp_example.py' , lowerCAmelCase__ ) self.one_complete_example('complete_nlp_example.py' , lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' lowercase__: List[str] = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) lowercase__: List[Any] = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) self.one_complete_example('complete_cv_example.py' , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) @mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} ) class __a ( A_ ): __lowercase : str = False @classmethod def SCREAMING_SNAKE_CASE__ ( cls ) -> Optional[int]: '''simple docstring''' super().setUpClass() lowercase__: Optional[int] = tempfile.mkdtemp() lowercase__: int = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) lowercase__: Dict = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def SCREAMING_SNAKE_CASE__ ( cls ) -> List[str]: '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def SCREAMING_SNAKE_CASE__ ( self ) -> Any: '''simple docstring''' lowercase__: Optional[int] = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' lowercase__: Optional[Any] = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split() lowercase__: int = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: Any = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split() lowercase__: Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ ) self.assertNotIn('epoch 0:' , lowerCAmelCase__ ) self.assertIn('epoch 1:' , lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' lowercase__: int = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split() lowercase__: List[str] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ ) if torch.cuda.is_available(): lowercase__: Any = torch.cuda.device_count() else: lowercase__: str = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , lowerCAmelCase__ ) self.assertIn('epoch 1:' , lowerCAmelCase__ ) else: self.assertIn('epoch 0:' , lowerCAmelCase__ ) self.assertIn('epoch 1:' , lowerCAmelCase__ ) @slow def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): lowercase__: int = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ ) lowercase__: Optional[Any] = re.findall('({.+})' , lowerCAmelCase__ ) lowercase__: Any = [r for r in results if 'accuracy' in r][-1] lowercase__: List[Any] = ast.literal_eval(lowerCAmelCase__ ) self.assertGreaterEqual(results['accuracy'] , 0.7_5 ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' lowercase__: Tuple = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: lowercase__: List[str] = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , 'tracking' ) ) ) def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__: Optional[int] = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' lowercase__: Dict = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
196
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if len(lowercase__ ) <= 1 or n <= 1: return insert_next(lowercase__ , n - 1 ) rec_insertion_sort(lowercase__ , n - 1 ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowercase, __lowercase= ( collection[index], collection[index - 1], ) insert_next(lowercase__ , index + 1 ) if __name__ == "__main__": lowerCAmelCase = input('''Enter integers separated by spaces: ''') lowerCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
295
0
'''simple docstring''' import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=4 , ): '''simple docstring''' UpperCamelCase__ :int = parent UpperCamelCase__ :int = batch_size UpperCamelCase__ :Union[str, Any] = seq_length UpperCamelCase__ :str = is_training UpperCamelCase__ :Union[str, Any] = use_attention_mask UpperCamelCase__ :Any = use_token_type_ids UpperCamelCase__ :Optional[int] = use_labels UpperCamelCase__ :List[Any] = vocab_size UpperCamelCase__ :Optional[Any] = hidden_size UpperCamelCase__ :Union[str, Any] = num_hidden_layers UpperCamelCase__ :Tuple = num_attention_heads UpperCamelCase__ :List[Any] = intermediate_size UpperCamelCase__ :Optional[Any] = hidden_act UpperCamelCase__ :Tuple = hidden_dropout_prob UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob UpperCamelCase__ :Optional[Any] = max_position_embeddings UpperCamelCase__ :Optional[int] = type_vocab_size UpperCamelCase__ :List[str] = type_sequence_label_size UpperCamelCase__ :Any = initializer_range UpperCamelCase__ :str = num_choices def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :Union[str, Any] = None if self.use_attention_mask: UpperCamelCase__ :str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ :Dict = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase_ , ) return config, input_ids, attention_mask def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = config_and_inputs UpperCamelCase__ :Any = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class lowercase ( A_ , unittest.TestCase ): """simple docstring""" _a = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = FlaxDistilBertModelTester(self ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCamelCase__ :Tuple = model_class_name.from_pretrained('''distilbert-base-uncased''' ) UpperCamelCase__ :Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase_ ) @require_flax class lowercase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) UpperCamelCase__ :Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) UpperCamelCase__ :str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCamelCase__ :Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0] UpperCamelCase__ :Any = (1, 11, 768) self.assertEqual(output.shape , UpperCamelCase_ ) UpperCamelCase__ :List[str] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1e-4 ) )
97
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list: '''simple docstring''' __lowercase= [] __lowercase= 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) __lowercase= index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
295
0
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Optional[int] ,__lowercase : List[str] ,__lowercase : Tuple ): '''simple docstring''' A_ : int = BigBirdConfig.from_json_file(lowercase__ ) print(f'''Building PyTorch model from configuration: {config}''' ) if is_trivia_qa: A_ : List[Any] = BigBirdForQuestionAnswering(lowercase__ ) else: A_ : Optional[int] = BigBirdForPreTraining(lowercase__ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(lowercase__ ,lowercase__ ,is_trivia_qa=lowercase__ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--big_bird_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head.""" ) _UpperCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
140
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict: '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase__ ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''The csv file to plot.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) UpperCamelCase_ : Optional[List[str]] =list_field( default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: int(lowercase__ ) return True except ValueError: return False def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: float(lowercase__ ) return True except ValueError: return False class A : def __init__(self , lowerCAmelCase ): __lowercase= args __lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: __lowercase= csv.DictReader(lowerCAmelCase ) for row in reader: __lowercase= row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None __lowercase= int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None __lowercase= float(row['result'] ) def _A (self ): __lowercase, __lowercase= plt.subplots() __lowercase= 'Time usage' if self.args.is_time else 'Memory usage' __lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): __lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) ) __lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) ) __lowercase= self.result_dict[model_name]['result'] ((__lowercase), (__lowercase))= ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __lowercase= ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __lowercase= np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , ) else: __lowercase= np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__lowercase), (__lowercase))= ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) __lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )] plt.scatter( lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' ) plt.plot(lowerCAmelCase , lowerCAmelCase , '--' ) title_str += f' {label_model_name} vs.' __lowercase= title_str[:-4] __lowercase= 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase ) plt.xlabel(lowerCAmelCase ) plt.ylabel(lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= HfArgumentParser(lowercase__ ) __lowercase= parser.parse_args_into_dataclasses()[0] __lowercase= Plot(args=lowercase__ ) plot.plot() if __name__ == "__main__": main()
295
0
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan UpperCamelCase__ = 6_3_7_8_1_3_7.0 UpperCamelCase__ = 6_3_5_6_7_5_2.3_1_4_2_4_5 UpperCamelCase__ = 6_3_7_8_1_3_7 def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: UpperCAmelCase__ : int = (AXIS_A - AXIS_B) / AXIS_A UpperCAmelCase__ : int = atan((1 - flattening) * tan(radians(lowercase__ ) ) ) UpperCAmelCase__ : Union[str, Any] = atan((1 - flattening) * tan(radians(lowercase__ ) ) ) UpperCAmelCase__ : Dict = radians(lowercase__ ) UpperCAmelCase__ : str = radians(lowercase__ ) # Equation UpperCAmelCase__ : Optional[int] = sin((phi_a - phi_a) / 2 ) UpperCAmelCase__ : List[Any] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda UpperCAmelCase__ : Optional[Any] = sqrt(sin_sq_phi + (cos(lowercase__ ) * cos(lowercase__ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
181
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int =DPRContextEncoderTokenizer class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer lowerCAmelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(A_ ) class A : def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) elif titles is None or texts is None: __lowercase= titles if texts is None else texts return super().__call__( lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles] __lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts] __lowercase= len(lowerCAmelCase ) __lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.' __lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase ) ] } if return_attention_mask is not False: __lowercase= [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase= attention_mask return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ): __lowercase= reader_input['input_ids'] __lowercase, __lowercase, __lowercase= reader_output[:3] __lowercase= len(lowerCAmelCase ) __lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ ) __lowercase= [] for doc_id in sorted_docs: __lowercase= list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase= sequence_ids.index(self.pad_token_id ) else: __lowercase= len(lowerCAmelCase ) __lowercase= self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= [] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase ) __lowercase= [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __lowercase= end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class A ( A_ , A_ ): UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : Dict =DPRReaderTokenizer
295
0
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowercase ( A_ , unittest.TestCase ): '''simple docstring''' _SCREAMING_SNAKE_CASE : List[Any] = DebertaTokenizer _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : Optional[Any] = DebertaTokenizerFast def a ( self : List[str] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """[UNK]""", ] __lowerCAmelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __lowerCAmelCase = {"""unk_token""": """[UNK]"""} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) ) def a ( self : Dict , **SCREAMING_SNAKE_CASE__ : Any ) -> int: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]: __lowerCAmelCase = """lower newer""" __lowerCAmelCase = """lower newer""" return input_text, output_text def a ( self : str ) -> List[str]: __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = """lower newer""" __lowerCAmelCase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] __lowerCAmelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def a ( self : List[Any] ) -> Optional[int]: __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = tokenizer("""Hello""" , """World""" ) __lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["""token_type_ids"""] , SCREAMING_SNAKE_CASE__ ) @slow def a ( self : Any ) -> str: __lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) __lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = tokenizer.encode( """sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def a ( self : List[str] ) -> Union[str, Any]: __lowerCAmelCase = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowerCAmelCase = tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) __lowerCAmelCase = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] __lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = [tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) for seq in encoding["""input_ids"""]] # fmt: off __lowerCAmelCase = { """input_ids""": [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], """token_type_ids""": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowerCAmelCase = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE__ ) for expected, decoded in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
229
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A ( nn.Module ): def __init__(self ): super().__init__() __lowercase= nn.Linear(3 , 4 ) __lowercase= nn.BatchNormad(4 ) __lowercase= nn.Linear(4 , 5 ) def _A (self , lowerCAmelCase ): return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) ) class A ( A_ ): def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): return (args[0] + 1,) + args[1:], kwargs class A ( A_ ): def _A (self , lowerCAmelCase , lowerCAmelCase ): return output + 1 class A ( unittest.TestCase ): def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(test_model._hf_hook , lowerCAmelCase ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase ) self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(x + 1 ) __lowercase= test_model(x + 2 ) __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) ) self.assertTrue(outputa.requires_grad ) __lowercase= True __lowercase= test_model(lowerCAmelCase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) ) __lowercase= torch.randn(2 , 3 ).to(0 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(0 ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload __lowercase= { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
295
0
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker lowerCAmelCase__ = '''CompVis/stable-diffusion-v1-1''' lowerCAmelCase__ = '''CompVis/stable-diffusion-v1-2''' lowerCAmelCase__ = '''CompVis/stable-diffusion-v1-3''' lowerCAmelCase__ = '''CompVis/stable-diffusion-v1-4''' class lowercase_ (A_ ): """simple docstring""" def __init__( self : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : Dict ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Dict = True ,): super()._init_() __lowercase = StableDiffusionPipeline.from_pretrained(lowercase__ ) __lowercase = StableDiffusionPipeline.from_pretrained(lowercase__ ) __lowercase = StableDiffusionPipeline.from_pretrained(lowercase__ ) __lowercase = StableDiffusionPipeline( vae=lowercase__ ,text_encoder=lowercase__ ,tokenizer=lowercase__ ,unet=lowercase__ ,scheduler=lowercase__ ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,requires_safety_checker=lowercase__ ,) self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ) @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return {k: getattr(self ,lowercase__ ) for k in self.config.keys() if not k.startswith('''_''' )} def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Dict = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): self.enable_attention_slicing(lowercase__ ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Tuple = 5_1_2 ,lowercase__ : Optional[Any] = 5_1_2 ,lowercase__ : str = 5_0 ,lowercase__ : Any = 7.5 ,lowercase__ : Any = None ,lowercase__ : Union[str, Any] = 1 ,lowercase__ : Union[str, Any] = 0.0 ,lowercase__ : List[str] = None ,lowercase__ : str = None ,lowercase__ : Dict = "pil" ,lowercase__ : Any = True ,lowercase__ : Optional[int] = None ,lowercase__ : Dict = 1 ,**lowercase__ : Tuple ,): return self.pipea( prompt=lowercase__ ,height=lowercase__ ,width=lowercase__ ,num_inference_steps=lowercase__ ,guidance_scale=lowercase__ ,negative_prompt=lowercase__ ,num_images_per_prompt=lowercase__ ,eta=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,output_type=lowercase__ ,return_dict=lowercase__ ,callback=lowercase__ ,callback_steps=lowercase__ ,**lowercase__ ,) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] = 5_1_2 ,lowercase__ : Any = 5_1_2 ,lowercase__ : str = 5_0 ,lowercase__ : Any = 7.5 ,lowercase__ : Optional[int] = None ,lowercase__ : Any = 1 ,lowercase__ : List[Any] = 0.0 ,lowercase__ : str = None ,lowercase__ : List[Any] = None ,lowercase__ : str = "pil" ,lowercase__ : Optional[Any] = True ,lowercase__ : Union[str, Any] = None ,lowercase__ : List[str] = 1 ,**lowercase__ : Optional[Any] ,): return self.pipea( prompt=lowercase__ ,height=lowercase__ ,width=lowercase__ ,num_inference_steps=lowercase__ ,guidance_scale=lowercase__ ,negative_prompt=lowercase__ ,num_images_per_prompt=lowercase__ ,eta=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,output_type=lowercase__ ,return_dict=lowercase__ ,callback=lowercase__ ,callback_steps=lowercase__ ,**lowercase__ ,) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Dict = 5_1_2 ,lowercase__ : List[Any] = 5_1_2 ,lowercase__ : List[str] = 5_0 ,lowercase__ : Optional[Any] = 7.5 ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[int] = 1 ,lowercase__ : Tuple = 0.0 ,lowercase__ : Union[str, Any] = None ,lowercase__ : Tuple = None ,lowercase__ : str = "pil" ,lowercase__ : Dict = True ,lowercase__ : Dict = None ,lowercase__ : Any = 1 ,**lowercase__ : List[str] ,): return self.pipea( prompt=lowercase__ ,height=lowercase__ ,width=lowercase__ ,num_inference_steps=lowercase__ ,guidance_scale=lowercase__ ,negative_prompt=lowercase__ ,num_images_per_prompt=lowercase__ ,eta=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,output_type=lowercase__ ,return_dict=lowercase__ ,callback=lowercase__ ,callback_steps=lowercase__ ,**lowercase__ ,) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Optional[int] = 5_1_2 ,lowercase__ : Any = 5_1_2 ,lowercase__ : Optional[Any] = 5_0 ,lowercase__ : List[str] = 7.5 ,lowercase__ : List[Any] = None ,lowercase__ : int = 1 ,lowercase__ : List[str] = 0.0 ,lowercase__ : Any = None ,lowercase__ : Any = None ,lowercase__ : Any = "pil" ,lowercase__ : List[Any] = True ,lowercase__ : Optional[Any] = None ,lowercase__ : Optional[int] = 1 ,**lowercase__ : int ,): return self.pipea( prompt=lowercase__ ,height=lowercase__ ,width=lowercase__ ,num_inference_steps=lowercase__ ,guidance_scale=lowercase__ ,negative_prompt=lowercase__ ,num_images_per_prompt=lowercase__ ,eta=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,output_type=lowercase__ ,return_dict=lowercase__ ,callback=lowercase__ ,callback_steps=lowercase__ ,**lowercase__ ,) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Dict = 5_1_2 ,lowercase__ : Tuple = 5_1_2 ,lowercase__ : Dict = 5_0 ,lowercase__ : Tuple = 7.5 ,lowercase__ : str = None ,lowercase__ : Any = 1 ,lowercase__ : List[str] = 0.0 ,lowercase__ : Optional[Any] = None ,lowercase__ : Tuple = None ,lowercase__ : Optional[int] = "pil" ,lowercase__ : Dict = True ,lowercase__ : Optional[int] = None ,lowercase__ : Any = 1 ,**lowercase__ : List[Any] ,): __lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(lowercase__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." ) # Get first result from Stable Diffusion Checkpoint v1.1 __lowercase = self.textaimg_sda_a( prompt=lowercase__ ,height=lowercase__ ,width=lowercase__ ,num_inference_steps=lowercase__ ,guidance_scale=lowercase__ ,negative_prompt=lowercase__ ,num_images_per_prompt=lowercase__ ,eta=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,output_type=lowercase__ ,return_dict=lowercase__ ,callback=lowercase__ ,callback_steps=lowercase__ ,**lowercase__ ,) # Get first result from Stable Diffusion Checkpoint v1.2 __lowercase = self.textaimg_sda_a( prompt=lowercase__ ,height=lowercase__ ,width=lowercase__ ,num_inference_steps=lowercase__ ,guidance_scale=lowercase__ ,negative_prompt=lowercase__ ,num_images_per_prompt=lowercase__ ,eta=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,output_type=lowercase__ ,return_dict=lowercase__ ,callback=lowercase__ ,callback_steps=lowercase__ ,**lowercase__ ,) # Get first result from Stable Diffusion Checkpoint v1.3 __lowercase = self.textaimg_sda_a( prompt=lowercase__ ,height=lowercase__ ,width=lowercase__ ,num_inference_steps=lowercase__ ,guidance_scale=lowercase__ ,negative_prompt=lowercase__ ,num_images_per_prompt=lowercase__ ,eta=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,output_type=lowercase__ ,return_dict=lowercase__ ,callback=lowercase__ ,callback_steps=lowercase__ ,**lowercase__ ,) # Get first result from Stable Diffusion Checkpoint v1.4 __lowercase = self.textaimg_sda_a( prompt=lowercase__ ,height=lowercase__ ,width=lowercase__ ,num_inference_steps=lowercase__ ,guidance_scale=lowercase__ ,negative_prompt=lowercase__ ,num_images_per_prompt=lowercase__ ,eta=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,output_type=lowercase__ ,return_dict=lowercase__ ,callback=lowercase__ ,callback_steps=lowercase__ ,**lowercase__ ,) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
104
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A ( unittest.TestCase ): def _A (self ): __lowercase= logging.get_logger() # the current default level is logging.WARNING __lowercase= logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) def _A (self ): __lowercase= logging.get_verbosity() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase ) __lowercase= logging.log_levels[env_level_str] __lowercase= logging.get_verbosity() self.assertEqual( lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __lowercase= '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowercase= logging.logging.getLogger() with CaptureLogger(lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def _A (self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
295
0
import unittest import numpy as np def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ) -> np.ndarray: """simple docstring""" A : Tuple = np.shape(lowercase__ ) A : Any = np.shape(lowercase__ ) A : Dict = np.shape(lowercase__ ) if shape_a[0] != shape_b[0]: A : Any = ( """Expected the same number of rows for A and B. """ f'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(lowercase__ ) if shape_b[1] != shape_c[1]: A : Optional[int] = ( """Expected the same number of columns for B and C. """ f'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(lowercase__ ) A : Tuple = pseudo_inv if a_inv is None: try: A : Optional[Any] = np.linalg.inv(lowercase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): A : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A : Dict = np.array([[0, 3], [3, 0], [2, 3]] ) A : List[str] = np.array([[2, 1], [6, 3]] ) A : List[Any] = schur_complement(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) A : Dict = np.block([[a, b], [b.T, c]] ) A : Union[str, Any] = np.linalg.det(lowerCamelCase__ ) A : Dict = np.linalg.det(lowerCamelCase__ ) A : str = np.linalg.det(lowerCamelCase__ ) self.assertAlmostEqual(lowerCamelCase__, det_a * det_s ) def _lowerCAmelCase ( self ): A : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) A : Any = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowerCamelCase__ ): schur_complement(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) A : Dict = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowerCamelCase__ ): schur_complement(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
116
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase = '''▁''' lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } lowerCAmelCase = { '''google/pegasus-xsum''': 5_1_2, } lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= offset if additional_special_tokens is not None: if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError( f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is' f' {type(lowerCAmelCase )}' ) __lowercase= ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 ) ] if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) __lowercase= additional_special_tokens_extended else: __lowercase= [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )] __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) __lowercase= mask_token_sent __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # add special tokens to encoder dict __lowercase= { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __lowercase= {v: k for k, v in self.encoder.items()} @property def _A (self ): return len(self.sp_model ) + self.offset def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A (self , lowerCAmelCase ): return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __lowercase= self.sp_model.piece_to_id(lowerCAmelCase ) return sp_id + self.offset def _A (self , lowerCAmelCase ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __lowercase= self.sp_model.IdToPiece(index - self.offset ) return token def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def _A (self , lowerCAmelCase=False ): return 1 def _A (self , lowerCAmelCase ): __lowercase= set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return self._special_token_mask(lowerCAmelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
295
0
from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=A_ ): snake_case_ : List[Any] = ['''note_seq'''] def __init__( self : Dict , *snake_case__ : Optional[int] , **snake_case__ : Optional[int] ): """simple docstring""" requires_backends(self , ["note_seq"] ) @classmethod def UpperCamelCase ( cls : str , *snake_case__ : Dict , **snake_case__ : List[Any] ): """simple docstring""" requires_backends(cls , ["note_seq"] ) @classmethod def UpperCamelCase ( cls : Any , *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ): """simple docstring""" requires_backends(cls , ["note_seq"] )
133
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
0
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' super().__init__() A__ : Any =model A__ : Any =2 A__ : Optional[int] =nn.Linear(self.model.config.hidden_size , self.num_labels ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' pass def __lowerCamelCase ( __snake_case : Dict, __snake_case : str, __snake_case : str ) -> Any: """simple docstring""" A__ : Optional[int] =LongformerModel.from_pretrained(lowercase__ ) A__ : Any =LightningModel(lowercase__ ) A__ : str =torch.load(lowercase__, map_location=torch.device("""cpu""" ) ) lightning_model.load_state_dict(ckpt["""state_dict"""] ) # init longformer question answering model A__ : Optional[int] =LongformerForQuestionAnswering.from_pretrained(lowercase__ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(lowercase__ ) print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" ) if __name__ == "__main__": __snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __snake_case : List[str] = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
134
from math import isqrt def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int: '''simple docstring''' __lowercase= 0 __lowercase= 1 __lowercase= 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
295
0
"""simple docstring""" import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self : int ,lowercase_ : List[Any] ,lowercase_ : int=1_3 ,lowercase_ : Union[str, Any]=3_0 ,lowercase_ : Tuple=2 ,lowercase_ : str=3 ,lowercase_ : Dict=True ,lowercase_ : List[Any]=True ,lowercase_ : str=3_2 ,lowercase_ : Any=5 ,lowercase_ : Optional[int]=4 ,lowercase_ : Optional[Any]=3_7 ,lowercase_ : List[Any]="gelu" ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : int=0.1 ,lowercase_ : List[str]=1_0 ,lowercase_ : List[str]=0.02 ,): lowerCAmelCase__ : Any = parent lowerCAmelCase__ : int = batch_size lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : Optional[Any] = patch_size lowerCAmelCase__ : List[str] = num_channels lowerCAmelCase__ : Tuple = is_training lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : str = num_attention_heads lowerCAmelCase__ : str = intermediate_size lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : Tuple = type_sequence_label_size lowerCAmelCase__ : str = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase__ : Optional[Any] = (image_size // patch_size) ** 2 lowerCAmelCase__ : int = num_patches + 1 def __lowerCAmelCase ( self : str ): lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : int = ViTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,) return config, pixel_values def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Dict ,lowercase_ : Union[str, Any] ): lowerCAmelCase__ : List[Any] = FlaxViTModel(config=lowercase_ ) lowerCAmelCase__ : Any = model(lowercase_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase__ : List[str] = (self.image_size, self.image_size) lowerCAmelCase__ : Any = (self.patch_size, self.patch_size) lowerCAmelCase__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) ) def __lowerCAmelCase ( self : Dict ,lowercase_ : Union[str, Any] ,lowercase_ : List[Any] ): lowerCAmelCase__ : Any = self.type_sequence_label_size lowerCAmelCase__ : int = FlaxViTForImageClassification(config=lowercase_ ) lowerCAmelCase__ : Optional[int] = model(lowercase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ : List[str] = 1 lowerCAmelCase__ : Optional[Any] = FlaxViTForImageClassification(lowercase_ ) lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : Optional[int] = model(lowercase_ ) def __lowerCAmelCase ( self : Tuple ): lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) , ) : List[str] = config_and_inputs lowerCAmelCase__ : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class SCREAMING_SNAKE_CASE ( A_ , unittest.TestCase ): """simple docstring""" lowercase__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : int = FlaxViTModelTester(self ) lowerCAmelCase__ : Any = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=3_7 ) def __lowerCAmelCase ( self : int ): self.config_tester.run_common_tests() def __lowerCAmelCase ( self : str ): lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def __lowerCAmelCase ( self : Dict ): lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) def __lowerCAmelCase ( self : Union[str, Any] ): lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[Any] = model_class(lowercase_ ) lowerCAmelCase__ : Optional[int] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Dict = [*signature.parameters.keys()] lowerCAmelCase__ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,lowercase_ ) def __lowerCAmelCase ( self : str ): lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(lowercase_ ,lowercase_ ) lowerCAmelCase__ : Any = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : Optional[Any] ,**lowercase_ : Dict ): return model(pixel_values=lowercase_ ,**lowercase_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase__ : Optional[Any] = model_jitted(**lowercase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase__ : List[Any] = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ ,lowercase_ ): self.assertEqual(jitted_output.shape ,output.shape ) @slow def __lowerCAmelCase ( self : Optional[Any] ): for model_class_name in self.all_model_classes: lowerCAmelCase__ : Optional[Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' ) lowerCAmelCase__ : List[str] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) ) self.assertIsNotNone(lowercase_ )
106
from __future__ import annotations def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= 2 __lowercase= [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
295
0
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _lowerCamelCase ={ "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } _lowerCamelCase ={ "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } _lowerCamelCase ={ "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } _lowerCamelCase ={ "facebook/dpr-ctx_encoder-single-nq-base": 5_12, "facebook/dpr-ctx_encoder-multiset-base": 5_12, } _lowerCamelCase ={ "facebook/dpr-question_encoder-single-nq-base": 5_12, "facebook/dpr-question_encoder-multiset-base": 5_12, } _lowerCamelCase ={ "facebook/dpr-reader-single-nq-base": 5_12, "facebook/dpr-reader-multiset-base": 5_12, } _lowerCamelCase ={ "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } _lowerCamelCase ={ "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } _lowerCamelCase ={ "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class a_ ( A_ ): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __UpperCAmelCase = DPRContextEncoderTokenizer class a_ ( A_ ): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __UpperCAmelCase = DPRQuestionEncoderTokenizer _lowerCamelCase =collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) _lowerCamelCase =collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) _lowerCamelCase =R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(A_ ) class a_ : """simple docstring""" def __call__( self : Optional[int] ,snake_case : Any ,snake_case : str = None ,snake_case : Dict = None ,snake_case : Any = False ,snake_case : Union[str, Any] = False ,snake_case : str = None ,snake_case : Tuple = None ,snake_case : Tuple = None ,**snake_case : List[str] ,): if titles is None and texts is None: return super().__call__( snake_case ,padding=snake_case ,truncation=snake_case ,max_length=snake_case ,return_tensors=snake_case ,return_attention_mask=snake_case ,**snake_case ,) elif titles is None or texts is None: SCREAMING_SNAKE_CASE =titles if texts is None else texts return super().__call__( snake_case ,snake_case ,padding=snake_case ,truncation=snake_case ,max_length=snake_case ,return_tensors=snake_case ,return_attention_mask=snake_case ,**snake_case ,) SCREAMING_SNAKE_CASE =titles if not isinstance(snake_case ,snake_case ) else [titles] SCREAMING_SNAKE_CASE =texts if not isinstance(snake_case ,snake_case ) else [texts] SCREAMING_SNAKE_CASE =len(snake_case ) SCREAMING_SNAKE_CASE =questions if not isinstance(snake_case ,snake_case ) else [questions] * n_passages assert len(snake_case ) == len( snake_case ), f'There should be as many titles than texts but got {len(snake_case )} titles and {len(snake_case )} texts.' SCREAMING_SNAKE_CASE =super().__call__(snake_case ,snake_case ,padding=snake_case ,truncation=snake_case )['input_ids'] SCREAMING_SNAKE_CASE =super().__call__(snake_case ,add_special_tokens=snake_case ,padding=snake_case ,truncation=snake_case )['input_ids'] SCREAMING_SNAKE_CASE ={ 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(snake_case ,snake_case ) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE =[] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) SCREAMING_SNAKE_CASE =attention_mask return self.pad(snake_case ,padding=snake_case ,max_length=snake_case ,return_tensors=snake_case ) def _lowerCAmelCase ( self : Optional[int] ,snake_case : int ,snake_case : str ,snake_case : Union[str, Any] = 16 ,snake_case : str = 64 ,snake_case : int = 4 ,): SCREAMING_SNAKE_CASE =reader_input['input_ids'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =reader_output[:3] SCREAMING_SNAKE_CASE =len(snake_case ) SCREAMING_SNAKE_CASE =sorted(range(snake_case ) ,reverse=snake_case ,key=relevance_logits.__getitem__ ) SCREAMING_SNAKE_CASE =[] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE =list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE =sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE =sequence_ids.index(self.pad_token_id ) else: SCREAMING_SNAKE_CASE =len(snake_case ) SCREAMING_SNAKE_CASE =self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=snake_case ,top_spans=snake_case ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=snake_case ,start_index=snake_case ,end_index=snake_case ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(snake_case ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any] ,snake_case : str ,snake_case : Optional[int] ,snake_case : Tuple ,): SCREAMING_SNAKE_CASE =[] for start_index, start_score in enumerate(snake_case ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) SCREAMING_SNAKE_CASE =sorted(snake_case ,key=lambda snake_case : x[1] ,reverse=snake_case ) SCREAMING_SNAKE_CASE =[] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' SCREAMING_SNAKE_CASE =end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(snake_case ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class a_ ( A_ , A_ ): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION __UpperCAmelCase = ['''input_ids''', '''attention_mask'''] __UpperCAmelCase = DPRReaderTokenizer
334
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase = None lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase = { '''t5-small''': 5_1_2, '''t5-base''': 5_1_2, '''t5-large''': 5_1_2, '''t5-3b''': 5_1_2, '''t5-11b''': 5_1_2, } class A ( A_ ): UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : List[str] =TaTokenizer UpperCamelCase_ : List[int] =[] def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens __lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= vocab_file __lowercase= False if not self.vocab_file else True __lowercase= extra_ids @staticmethod def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: __lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , ) return max_model_length def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file , lowerCAmelCase ) logger.info(f'Copy vocab file to {out_vocab_file}' ) return (out_vocab_file,) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: __lowercase= token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _A (self ): return list( set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def _A (self ): return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
295
0
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __a ( A_ ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> str: '''simple docstring''' lowercase__: str = parent lowercase__: List[str] = batch_size lowercase__: Tuple = seq_length lowercase__: int = is_training lowercase__: str = use_input_mask lowercase__: Optional[int] = use_token_type_ids lowercase__: Union[str, Any] = use_labels lowercase__: Tuple = vocab_size lowercase__: List[Any] = hidden_size lowercase__: List[Any] = num_hidden_layers lowercase__: List[str] = num_attention_heads lowercase__: Optional[int] = intermediate_size lowercase__: str = hidden_act lowercase__: Union[str, Any] = hidden_dropout_prob lowercase__: Tuple = attention_probs_dropout_prob lowercase__: Optional[Any] = max_position_embeddings lowercase__: Any = type_vocab_size lowercase__: str = type_sequence_label_size lowercase__: Tuple = initializer_range lowercase__: List[Any] = num_labels lowercase__: Dict = num_choices lowercase__: List[Any] = scope def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]: '''simple docstring''' lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: Optional[Any] = None if self.use_input_mask: lowercase__: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__: str = None lowercase__: Optional[Any] = None lowercase__: Tuple = None if self.use_labels: lowercase__: Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__: int = ids_tensor([self.batch_size] , self.num_choices ) lowercase__: Any = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' lowercase__: Any = DistilBertModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowercase__: int = model(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase__: Union[str, Any] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' lowercase__: Tuple = DistilBertForMaskedLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowercase__: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' lowercase__: List[str] = DistilBertForQuestionAnswering(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowercase__: Union[str, Any] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' lowercase__: Optional[Any] = self.num_labels lowercase__: Tuple = DistilBertForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowercase__: Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: '''simple docstring''' lowercase__: int = self.num_labels lowercase__: Union[str, Any] = DistilBertForTokenClassification(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowercase__: Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: '''simple docstring''' lowercase__: Union[str, Any] = self.num_choices lowercase__: List[Any] = DistilBertForMultipleChoice(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowercase__: Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Any = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' lowercase__: Tuple = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)): Tuple = config_and_inputs lowercase__: List[str] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __a ( A_ , A_ , unittest.TestCase ): __lowercase : Any = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) __lowercase : Optional[int] = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) __lowercase : str = True __lowercase : str = True __lowercase : Union[str, Any] = True __lowercase : Optional[int] = True def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' lowercase__: int = DistilBertModelTester(self ) lowercase__: Dict = ConfigTester(self , config_class=lowerCAmelCase__ , dim=37 ) def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]: '''simple docstring''' lowercase__: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ ) @slow def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__: Union[str, Any] = DistilBertModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return lowercase__: Any = True lowercase__: List[str] = model_class(config=lowerCAmelCase__ ) lowercase__: Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase__: Optional[int] = torch.jit.trace( lowerCAmelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'traced_model.pt' ) ) lowercase__: Any = torch.jit.load(os.path.join(lowerCAmelCase__ , 'traced_model.pt' ) , map_location=lowerCAmelCase__ ) loaded(inputs_dict['input_ids'].to(lowerCAmelCase__ ) , inputs_dict['attention_mask'].to(lowerCAmelCase__ ) ) @require_torch class __a ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' lowercase__: Optional[int] = DistilBertModel.from_pretrained('distilbert-base-uncased' ) lowercase__: Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) lowercase__: Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase__: Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0] lowercase__: Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , lowerCAmelCase__ ) lowercase__: Optional[Any] = torch.tensor( [[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
196
from collections.abc import Sequence def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float: '''simple docstring''' if not arr: return 0 __lowercase= 0 if allow_empty_subarrays else float('-inf' ) __lowercase= 0.0 for num in arr: __lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowercase= max(lowercase__ , lowercase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
295
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch __snake_case = logging.get_logger(__name__) @dataclass class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=6.0 , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=None , UpperCamelCase_="fp4" , UpperCamelCase_=False , **UpperCamelCase_ , ): '''simple docstring''' UpperCamelCase__ :List[str] = load_in_abit UpperCamelCase__ :Any = load_in_abit UpperCamelCase__ :Optional[int] = llm_inta_threshold UpperCamelCase__ :Optional[Any] = llm_inta_skip_modules UpperCamelCase__ :List[Any] = llm_inta_enable_fpaa_cpu_offload UpperCamelCase__ :Optional[Any] = llm_inta_has_fpaa_weight UpperCamelCase__ :Dict = bnb_abit_quant_type UpperCamelCase__ :int = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: UpperCamelCase__ :Union[str, Any] = torch.floataa elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase__ :Union[str, Any] = getattr(UpperCamelCase_ , UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , torch.dtype ): UpperCamelCase__ :Any = bnb_abit_compute_dtype else: raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' ) self.post_init() def lowerCAmelCase__ ( self ): '''simple docstring''' if not isinstance(self.llm_inta_threshold , UpperCamelCase_ ): raise ValueError('''llm_int8_threshold must be a float''' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCamelCase_ ): raise ValueError('''llm_int8_skip_modules must be a list of strings''' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCamelCase_ ): raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' ) if not isinstance(self.llm_inta_has_fpaa_weight , UpperCamelCase_ ): raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' ) if not isinstance(self.bnb_abit_quant_type , UpperCamelCase_ ): raise ValueError('''bnb_4bit_quant_type must be a string''' ) if not isinstance(self.bnb_abit_use_double_quant , UpperCamelCase_ ): raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' ) if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse( '''0.39.0''' ): raise ValueError( '''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' ) def lowerCAmelCase__ ( self ): '''simple docstring''' return self.load_in_abit or self.load_in_abit def lowerCAmelCase__ ( self ): '''simple docstring''' if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def lowerCAmelCase__ ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Optional[Any] = cls(**UpperCamelCase_ ) UpperCamelCase__ :int = [] for key, value in kwargs.items(): if hasattr(UpperCamelCase_ , UpperCamelCase_ ): setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) to_remove.append(UpperCamelCase_ ) for key in to_remove: kwargs.pop(UpperCamelCase_ , UpperCamelCase_ ) if return_unused_kwargs: return config, kwargs else: return config def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: UpperCamelCase__ :Dict = self.to_dict() UpperCamelCase__ :Any = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + '''\n''' writer.write(UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[Any] = copy.deepcopy(self.__dict__ ) UpperCamelCase__ :Dict = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1] return output def __repr__( self ): '''simple docstring''' return F'''{self.__class__.__name__} {self.to_json_string()}''' def lowerCAmelCase__ ( self , UpperCamelCase_ = True ): '''simple docstring''' if use_diff is True: UpperCamelCase__ :Any = self.to_diff_dict() else: UpperCamelCase__ :Optional[int] = self.to_dict() return json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + "\n" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Any = self.to_dict() # get the default config dict UpperCamelCase__ :Optional[Any] = BitsAndBytesConfig().to_dict() UpperCamelCase__ :int = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: UpperCamelCase__ :Union[str, Any] = value return serializable_config_dict
97
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Any =PriorTransformer UpperCamelCase_ : List[str] ='''hidden_states''' @property def _A (self ): __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _A (self ): return (4, 8) @property def _A (self ): return (4, 8) def _A (self ): __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __lowercase= self.dummy_input return init_dict, inputs_dict def _A (self ): __lowercase, __lowercase= PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(lowerCAmelCase ) __lowercase= model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _A (self ): __lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common() __lowercase= self.model_class(**lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , lowerCAmelCase ) def _A (self ): __lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __lowercase= model.to(lowerCAmelCase ) if hasattr(lowerCAmelCase , 'set_default_attn_processor' ): model.set_default_attn_processor() __lowercase= self.get_dummy_seed_input() with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] __lowercase= output[0, :5].flatten().cpu() print(lowerCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] ) self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= batch_size __lowercase= embedding_dim __lowercase= num_embeddings __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]], [3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]], # fmt: on ] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(lowerCAmelCase ) __lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase ) with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] assert list(sample.shape ) == [1, 7_6_8] __lowercase= sample[0, :8].flatten().cpu() print(lowerCAmelCase ) __lowercase= torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
295
0
from math import pi, sqrt def UpperCamelCase ( __lowercase : Any ): '''simple docstring''' if num <= 0: raise ValueError('math domain error' ) if num > 1_71.5: raise OverflowError('math range error' ) elif num - int(lowercase__ ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(lowercase__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def UpperCamelCase ( ): '''simple docstring''' assert gamma(0.5 ) == sqrt(lowercase__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _UpperCAmelCase = 1.0 while num: _UpperCAmelCase = float(input("""Gamma of: """)) print(F"""gamma({num}) = {gamma(num)}""") print("""\nEnter 0 to exit...""")
140
def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if collection == []: return [] # get some information about the collection __lowercase= len(lowercase__ ) __lowercase= max(lowercase__ ) __lowercase= min(lowercase__ ) # create the counting array __lowercase= coll_max + 1 - coll_min __lowercase= [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): __lowercase= counting_arr[i] + counting_arr[i - 1] # create the output collection __lowercase= [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): __lowercase= collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
295
0
'''simple docstring''' from __future__ import annotations def a__ ( lowerCAmelCase__ ) -> list[int]: UpperCAmelCase__ : List[str] = [True] * limit UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : List[Any] = False UpperCAmelCase__ : Optional[int] = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCAmelCase__ : Any = i * 2 while index < limit: UpperCAmelCase__ : Union[str, Any] = False UpperCAmelCase__ : Dict = index + i UpperCAmelCase__ : Optional[int] = [2] for i in range(3 , lowercase__ , 2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def a__ ( lowerCAmelCase__ = 1_00_00_00 ) -> int: UpperCAmelCase__ : Any = prime_sieve(lowercase__ ) UpperCAmelCase__ : int = 0 UpperCAmelCase__ : str = 0 for i in range(len(lowercase__ ) ): for j in range(i + length , len(lowercase__ ) ): UpperCAmelCase__ : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCAmelCase__ : Tuple = j - i UpperCAmelCase__ : str = sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
181
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_input_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _A (self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForMaskedLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForTokenClassification(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_choices __lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Any =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase_ : Optional[int] =( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : str =True UpperCamelCase_ : str =True UpperCamelCase_ : Union[str, Any] =True UpperCamelCase_ : Optional[int] =True def _A (self ): __lowercase= DistilBertModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase ) @slow def _A (self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= DistilBertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __lowercase= True __lowercase= model_class(config=lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= torch.jit.trace( lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) ) __lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase ) loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' ) __lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0] __lowercase= torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , lowerCAmelCase ) __lowercase= torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
295
0
'''simple docstring''' from math import isqrt def UpperCamelCase_ ( snake_case_ : str ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def UpperCamelCase_ ( snake_case_ : Union[str, Any] = 10**6 ) -> int: '''simple docstring''' __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(f'{solution() = }')
229
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= [False] * len(lowercase__ ) __lowercase= [] queue.append(lowercase__ ) __lowercase= True while queue: __lowercase= queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) __lowercase= True __lowercase= u return visited[t] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= [-1] * (len(lowercase__ )) __lowercase= 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowercase= float('Inf' ) __lowercase= sink while s != source: # Find the minimum value in select path __lowercase= min(lowercase__ , graph[parent[s]][s] ) __lowercase= parent[s] max_flow += path_flow __lowercase= sink while v != source: __lowercase= parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase= parent[v] return max_flow lowerCAmelCase = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase ,lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
295
0
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def _A ( ): """simple docstring""" __lowercase , __lowercase = 9, 14 # noqa: F841 __lowercase = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __lowercase = defaultdict(lowercase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) __lowercase = mst(lowercase__ ) __lowercase = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: __lowercase = tuple(answer[:2] ) __lowercase = tuple(edge[::-1] ) assert edge in result or reverse in result
104
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= get_failure_array(lowercase__ ) # 2) Step through text searching for pattern __lowercase, __lowercase= 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase= failure[j - 1] continue i += 1 return False def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0] __lowercase= 0 __lowercase= 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase= failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase = '''abc1abc12''' lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCAmelCase = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase = '''ABABX''' lowerCAmelCase = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCAmelCase = '''AAAB''' lowerCAmelCase = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCAmelCase = '''abcdabcy''' lowerCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCAmelCase = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
295
0
import os import numpy import onnx def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: """simple docstring""" A : List[str] = a.name A : List[Any] = b.name A : str = """""" A : List[str] = """""" A : Dict = a == b A : Optional[int] = name_a A : Optional[Any] = name_b return res def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase__ , lowercase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) _graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowercase__ , lowercase__ , lowercase__ ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: """simple docstring""" A : List[str] = list(model.graph.initializer ) A : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i A : Union[str, Any] = inits[i].name A : Tuple = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ ) def __UpperCamelCase ( _lowerCAmelCase ) -> Dict: """simple docstring""" A : Dict = os.path.dirname(lowercase__ ) A : List[str] = os.path.basename(lowercase__ ) A : str = onnx.load(os.path.join(lowercase__ , lowercase__ ) ) A : List[Any] = list(model.graph.initializer ) A : Dict = set() A : Any = {} A : int = [] A : str = 0 for i in range(len(lowercase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowercase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowercase__ ) dup_set.add(lowercase__ ) A : Optional[int] = inits[j].data_type A : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , lowercase__ ) total_reduced_size += mem_size A : Optional[Any] = inits[i].name A : str = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase__ ) else: A : List[Any] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) A : Union[str, Any] = sorted(lowercase__ ) _remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ ) A : Tuple = """optimized_""" + model_file_name A : Union[str, Any] = os.path.join(lowercase__ , lowercase__ ) onnx.save(lowercase__ , lowercase__ ) return new_model
116
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
0
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ = " " ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) _UpperCAmelCase = index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
133
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A ( enum.Enum ): UpperCamelCase_ : Optional[int] =0 UpperCamelCase_ : Tuple =1 UpperCamelCase_ : Optional[int] =2 @add_end_docstrings(A_ ) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__(self , *lowerCAmelCase , **lowerCAmelCase ): super().__init__(*lowerCAmelCase , **lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowercase= None if self.model.config.prefix is not None: __lowercase= self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowercase= self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params ) __lowercase= {**self._preprocess_params, **preprocess_params} __lowercase= {**self._forward_params, **forward_params} def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ): __lowercase= {} if prefix is not None: __lowercase= prefix if prefix: __lowercase= self.tokenizer( lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected' ' [None, \'hole\']' ) __lowercase= handle_long_generation preprocess_params.update(lowerCAmelCase ) __lowercase= generate_kwargs __lowercase= {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.TENSORS if return_type is not None: __lowercase= return_type if clean_up_tokenization_spaces is not None: __lowercase= clean_up_tokenization_spaces if stop_sequence is not None: __lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) if len(lowerCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowercase= stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _A (self , *lowerCAmelCase , **lowerCAmelCase ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase ) def __call__(self , lowerCAmelCase , **lowerCAmelCase ): return super().__call__(lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prompt_text if handle_long_generation == "hole": __lowercase= inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowercase= generate_kwargs['max_new_tokens'] else: __lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowercase= self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowercase= inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowercase= inputs['attention_mask'][:, -keep_length:] return inputs def _A (self , lowerCAmelCase , **lowerCAmelCase ): __lowercase= model_inputs['input_ids'] __lowercase= model_inputs.get('attention_mask' , lowerCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: __lowercase= None __lowercase= None __lowercase= 1 else: __lowercase= input_ids.shape[0] __lowercase= model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowercase= generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowercase= 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowercase= 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase ) __lowercase= generated_sequence.shape[0] if self.framework == "pt": __lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ): __lowercase= model_outputs['generated_sequence'][0] __lowercase= model_outputs['input_ids'] __lowercase= model_outputs['prompt_text'] __lowercase= generated_sequence.numpy().tolist() __lowercase= [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowercase= {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowercase= self.tokenizer.decode( lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowercase= 0 else: __lowercase= len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) ) if return_type == ReturnType.FULL_TEXT: __lowercase= prompt_text + text[prompt_length:] else: __lowercase= text[prompt_length:] __lowercase= {'generated_text': all_text} records.append(lowerCAmelCase ) return records
295
0
'''simple docstring''' from typing import Any class lowerCamelCase : '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> Any: '''simple docstring''' A__ : Tuple =data A__ : List[str] =None def __repr__( self : int ) -> Tuple: '''simple docstring''' return f"Node({self.data})" class lowerCamelCase : '''simple docstring''' def __init__( self : List[Any] ) -> str: '''simple docstring''' A__ : List[str] =None def __iter__( self : Tuple ) -> Dict: '''simple docstring''' A__ : Any =self.head while node: yield node.data A__ : Tuple =node.next def __len__( self : Optional[int] ) -> Any: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self : int ) -> Optional[int]: '''simple docstring''' return "->".join([str(lowerCAmelCase_ ) for item in self] ) def __getitem__( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError("""list index out of range.""" ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError("""list index out of range.""" ) A__ : str =self.head for _ in range(lowerCAmelCase_ ): A__ : Optional[int] =current.next A__ : Union[str, Any] =data def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] ) -> Any: '''simple docstring''' self.insert_nth(len(self ) , lowerCAmelCase_ ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : Optional[int] ) -> int: '''simple docstring''' self.insert_nth(0 , lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Dict: '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError("""list index out of range""" ) A__ : Dict =Node(lowerCAmelCase_ ) if self.head is None: A__ : Any =new_node elif index == 0: A__ : Dict =self.head # link new_node to head A__ : Tuple =new_node else: A__ : int =self.head for _ in range(index - 1 ): A__ : List[Any] =temp.next A__ : List[Any] =temp.next A__ : Any =new_node def lowercase__ ( self : Tuple ) -> Tuple: # print every node data '''simple docstring''' print(self ) def lowercase__ ( self : Optional[Any] ) -> int: '''simple docstring''' return self.delete_nth(0 ) def lowercase__ ( self : Union[str, Any] ) -> List[str]: # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def lowercase__ ( self : List[Any] , lowerCAmelCase_ : List[Any] = 0 ) -> List[Any]: '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("""List index out of range.""" ) A__ : Dict =self.head # default first node if index == 0: A__ : Dict =self.head.next else: A__ : str =self.head for _ in range(index - 1 ): A__ : Optional[Any] =temp.next A__ : Optional[int] =temp.next A__ : Tuple =temp.next.next return delete_node.data def lowercase__ ( self : Dict ) -> List[Any]: '''simple docstring''' return self.head is None def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A__ : List[Any] =None A__ : List[Any] =self.head while current: # Store the current node's next node. A__ : str =current.next # Make the current node's next point backwards A__ : Optional[Any] =prev # Make the previous node be the current node A__ : int =current # Make the current node the next node (to progress iteration) A__ : Optional[Any] =next_node # Return prev in order to put the head at the end A__ : Any =prev def __lowerCamelCase ( ) -> None: """simple docstring""" A__ : List[str] =LinkedList() assert linked_list.is_empty() is True assert str(lowercase__ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(lowercase__ ) == i linked_list.insert_nth(lowercase__, i + 1 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1, 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0, 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(lowercase__ ) == 9 assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1, 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0, 9 ) ) is True for i in range(0, 9 ): A__ : Union[str, Any] =-i assert all(linked_list[i] == -i for i in range(0, 9 ) ) is True linked_list.reverse() assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(-8, 1 ) ) def __lowerCamelCase ( ) -> None: """simple docstring""" A__ : int =[ -9, 100, Node(77_345_112 ), """dlrow olleH""", 7, 5_555, 0, -1_92.5_55_55, """Hello, world!""", 77.9, Node(10 ), None, None, 12.20, ] A__ : Any =LinkedList() for i in test_input: linked_list.insert_tail(lowercase__ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(lowercase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head A__ : int =linked_list.delete_head() assert result == -9 assert ( str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail A__ : str =linked_list.delete_tail() assert result == 12.2 assert ( str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list A__ : List[str] =linked_list.delete_nth(10 ) assert result is None assert ( str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("""Hello again, world!""" ) ) assert ( str(lowercase__ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(lowercase__ ) assert ( str(lowercase__ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(lowercase__ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def __lowerCamelCase ( ) -> List[str]: """simple docstring""" from doctest import testmod testmod() A__ : List[str] =LinkedList() linked_list.insert_head(input("""Inserting 1st at head """ ).strip() ) linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() ) print("""\nPrint list:""" ) linked_list.print_list() linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() ) linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() ) print("""\nPrint list:""" ) linked_list.print_list() print("""\nDelete head""" ) linked_list.delete_head() print("""Delete tail""" ) linked_list.delete_tail() print("""\nPrint list:""" ) linked_list.print_list() print("""\nReverse linked list""" ) linked_list.reverse() print("""\nPrint list:""" ) linked_list.print_list() print("""\nString representation of linked list:""" ) print(lowercase__ ) print("""\nReading/changing Node data using indexing:""" ) print(f"Element at Position 1: {linked_list[1]}" ) A__ : str =input("""Enter New Value: """ ).strip() print("""New list:""" ) print(lowercase__ ) print(f"length of linked_list is : {len(lowercase__ )}" ) if __name__ == "__main__": main()
134
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): @register_to_config def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ): super().__init__() # pass init params to Encoder __lowercase= Encoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , ) __lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) __lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase ) __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) # pass init params to Decoder __lowercase= Decoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= self.encoder(lowerCAmelCase ) __lowercase= self.quant_conv(lowerCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ): # also go through quantization layer if not force_not_quantize: __lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase ) else: __lowercase= h __lowercase= self.post_quant_conv(lowerCAmelCase ) __lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= sample __lowercase= self.encode(lowerCAmelCase ).latents __lowercase= self.decode(lowerCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase )
295
0
"""simple docstring""" import argparse import importlib from pathlib import Path # Test all the extensions added in the setup __UpperCamelCase : Optional[Any] = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def __SCREAMING_SNAKE_CASE ( A_ ): for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": __UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') __UpperCamelCase : Tuple = parser.parse_args() if args.check_lib: __UpperCamelCase : List[str] = importlib.import_module('''transformers''') __UpperCamelCase : int = Path(transformers_module.__file__).parent else: __UpperCamelCase : Dict = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
106
import os import numpy import onnx def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= a.name __lowercase= b.name __lowercase= '' __lowercase= '' __lowercase= a == b __lowercase= name_a __lowercase= name_b return res def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase__ , lowercase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) _graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(lowercase__ , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= list(model.graph.initializer ) __lowercase= list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __lowercase= inits[i].name __lowercase= inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' __lowercase= os.path.dirname(lowercase__ ) __lowercase= os.path.basename(lowercase__ ) __lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) ) __lowercase= list(model.graph.initializer ) __lowercase= set() __lowercase= {} __lowercase= [] __lowercase= 0 for i in range(len(lowercase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowercase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowercase__ ) dup_set.add(lowercase__ ) __lowercase= inits[j].data_type __lowercase= numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 1_1: mem_size *= 8 else: print('unexpected data type: ' , lowercase__ ) total_reduced_size += mem_size __lowercase= inits[i].name __lowercase= inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase__ ) else: __lowercase= [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' ) __lowercase= sorted(lowercase__ ) _remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ ) __lowercase= 'optimized_' + model_file_name __lowercase= os.path.join(lowercase__ , lowercase__ ) onnx.save(lowercase__ , lowercase__ ) return new_model
295
0
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowerCamelCase =logging.getLogger(__name__) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" if os.path.exists(lowercase__ ): if os.path.exists(os.path.join(lowercase__, 'config.json' ) ) and os.path.isfile( os.path.join(lowercase__, 'config.json' ) ): os.remove(os.path.join(lowercase__, 'config.json' ) ) if os.path.exists(os.path.join(lowercase__, 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(lowercase__, 'pytorch_model.bin' ) ): os.remove(os.path.join(lowercase__, 'pytorch_model.bin' ) ) else: os.makedirs(lowercase__ ) model.save_pretrained(lowercase__ ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=False ): """simple docstring""" SCREAMING_SNAKE_CASE =2 if unlogit: SCREAMING_SNAKE_CASE =torch.pow(lowercase__, lowercase__ ) SCREAMING_SNAKE_CASE =p * torch.log(lowercase__ ) SCREAMING_SNAKE_CASE =0 return -plogp.sum(dim=-1 ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" logger.info('lv, h >\t' + '\t'.join(F'{x + 1}' for x in range(len(lowercase__ ) ) ) ) for row in range(len(lowercase__ ) ): if tensor.dtype != torch.long: logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:.5f}' for x in tensor[row].cpu().data ) ) else: logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:d}' for x in tensor[row].cpu().data ) ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=True, lowerCAmelCase_=True, lowerCAmelCase_=None, lowerCAmelCase_=False ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =model.config.num_hidden_layers, model.config.num_attention_heads SCREAMING_SNAKE_CASE =torch.zeros(lowercase__, lowercase__ ).to(args.device ) SCREAMING_SNAKE_CASE =torch.zeros(lowercase__, lowercase__ ).to(args.device ) if head_mask is None: SCREAMING_SNAKE_CASE =torch.ones(lowercase__, lowercase__ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowercase__ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =0.0 SCREAMING_SNAKE_CASE =0.0 for step, inputs in enumerate(tqdm(lowercase__, desc='Iteration', disable=args.local_rank not in [-1, 0] ) ): SCREAMING_SNAKE_CASE =tuple(t.to(args.device ) for t in inputs ) ((SCREAMING_SNAKE_CASE ) , ) =inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) SCREAMING_SNAKE_CASE =model(lowercase__, labels=lowercase__, head_mask=lowercase__ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowercase__ ): SCREAMING_SNAKE_CASE =entropy(attn.detach(), lowercase__ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowercase__ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: SCREAMING_SNAKE_CASE =2 SCREAMING_SNAKE_CASE =torch.pow(torch.pow(lowercase__, lowercase__ ).sum(-1 ), 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: SCREAMING_SNAKE_CASE =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(lowercase__ ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(lowercase__ ) logger.info('Head ranked by importance scores' ) SCREAMING_SNAKE_CASE =torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device ) SCREAMING_SNAKE_CASE =torch.arange( head_importance.numel(), device=args.device ) SCREAMING_SNAKE_CASE =head_ranks.view_as(lowercase__ ) print_ad_tensor(lowercase__ ) return attn_entropy, head_importance, total_loss def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =compute_heads_importance(lowercase__, lowercase__, lowercase__, compute_entropy=lowercase__ ) SCREAMING_SNAKE_CASE =1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f', lowercase__, original_score * args.masking_threshold ) SCREAMING_SNAKE_CASE =torch.ones_like(lowercase__ ) SCREAMING_SNAKE_CASE =max(1, int(new_head_mask.numel() * args.masking_amount ) ) SCREAMING_SNAKE_CASE =original_score while current_score >= original_score * args.masking_threshold: SCREAMING_SNAKE_CASE =new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads SCREAMING_SNAKE_CASE =float('Inf' ) SCREAMING_SNAKE_CASE =head_importance.view(-1 ).sort()[1] if len(lowercase__ ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads SCREAMING_SNAKE_CASE =current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s', str(current_heads_to_mask.tolist() ) ) SCREAMING_SNAKE_CASE =new_head_mask.view(-1 ) SCREAMING_SNAKE_CASE =0.0 SCREAMING_SNAKE_CASE =new_head_mask.view_as(lowercase__ ) SCREAMING_SNAKE_CASE =new_head_mask.clone().detach() print_ad_tensor(lowercase__ ) # Compute metric and head importance again SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =compute_heads_importance( lowercase__, lowercase__, lowercase__, compute_entropy=lowercase__, head_mask=lowercase__ ) SCREAMING_SNAKE_CASE =1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)', lowercase__, new_head_mask.sum(), new_head_mask.sum() / new_head_mask.numel() * 100, ) logger.info('Final head mask' ) print_ad_tensor(lowercase__ ) np.save(os.path.join(args.output_dir, 'head_mask.npy' ), head_mask.detach().cpu().numpy() ) return head_mask def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =datetime.now() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =compute_heads_importance( lowercase__, lowercase__, lowercase__, compute_entropy=lowercase__, compute_importance=lowercase__, head_mask=lowercase__ ) SCREAMING_SNAKE_CASE =1 / loss SCREAMING_SNAKE_CASE =datetime.now() - before_time SCREAMING_SNAKE_CASE =sum(p.numel() for p in model.parameters() ) SCREAMING_SNAKE_CASE ={ layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowercase__ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowercase__, lowercase__ ): SCREAMING_SNAKE_CASE =[ v, ] assert sum(len(lowercase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowercase__ ) SCREAMING_SNAKE_CASE =sum(p.numel() for p in model.parameters() ) SCREAMING_SNAKE_CASE =datetime.now() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =compute_heads_importance( lowercase__, lowercase__, lowercase__, compute_entropy=lowercase__, compute_importance=lowercase__, head_mask=lowercase__, actually_pruned=lowercase__, ) SCREAMING_SNAKE_CASE =1 / loss SCREAMING_SNAKE_CASE =datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)', lowercase__, lowercase__, pruned_num_params / original_num_params * 100, ) logger.info('Pruning: score with masking: %f score with pruning: %f', lowercase__, lowercase__ ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents', original_time / new_time * 100 ) save_model(lowercase__, args.output_dir ) def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir', default=lowercase__, type=lowercase__, required=lowercase__, help='The input data dir. Should contain the .tsv files (or other data files) for the task.', ) parser.add_argument( '--model_name_or_path', default=lowercase__, type=lowercase__, required=lowercase__, help='Path to pretrained model or model identifier from huggingface.co/models', ) parser.add_argument( '--output_dir', default=lowercase__, type=lowercase__, required=lowercase__, help='The output directory where the model predictions and checkpoints will be written.', ) # Other parameters parser.add_argument( '--config_name', default='', type=lowercase__, help='Pretrained config name or path if not the same as model_name_or_path', ) parser.add_argument( '--tokenizer_name', default='', type=lowercase__, help='Pretrained tokenizer name or path if not the same as model_name_or_path', ) parser.add_argument( '--cache_dir', default=lowercase__, type=lowercase__, help='Where do you want to store the pre-trained models downloaded from s3', ) parser.add_argument( '--data_subset', type=lowercase__, default=-1, help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir', action='store_true', help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer', action='store_true', help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance', action='store_true', help='Don\'t normalize all importance scores between 0 and 1', ) parser.add_argument( '--try_masking', action='store_true', help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold', default=0.9, type=lowercase__, help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).', ) parser.add_argument( '--masking_amount', default=0.1, type=lowercase__, help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name', default='acc', type=lowercase__, help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length', default=128, type=lowercase__, help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ), ) parser.add_argument('--batch_size', default=1, type=lowercase__, help='Batch size.' ) parser.add_argument('--seed', type=lowercase__, default=42 ) parser.add_argument('--local_rank', type=lowercase__, default=-1, help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip', type=lowercase__, default='', help='Can be used for distant debugging.' ) parser.add_argument('--server_port', type=lowercase__, default='', help='Can be used for distant debugging.' ) SCREAMING_SNAKE_CASE =parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=lowercase__ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: SCREAMING_SNAKE_CASE =torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) SCREAMING_SNAKE_CASE =0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) SCREAMING_SNAKE_CASE =torch.device('cuda', args.local_rank ) SCREAMING_SNAKE_CASE =1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device, args.n_gpu, bool(args.local_rank != -1 ) ) ) SCREAMING_SNAKE_CASE =GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: SCREAMING_SNAKE_CASE =nn.parallel.DistributedDataParallel( lowercase__, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=lowercase__ ) elif args.n_gpu > 1: SCREAMING_SNAKE_CASE =nn.DataParallel(lowercase__ ) # Print/save training arguments os.makedirs(args.output_dir, exist_ok=lowercase__ ) torch.save(lowercase__, os.path.join(args.output_dir, 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s', lowercase__ ) # Prepare dataset SCREAMING_SNAKE_CASE =np.concatenate( [ np.loadtxt(args.data_dir, dtype=np.intaa ), ] ) SCREAMING_SNAKE_CASE =(torch.from_numpy(lowercase__ ),) SCREAMING_SNAKE_CASE =TensorDataset(*lowercase__ ) SCREAMING_SNAKE_CASE =RandomSampler(lowercase__ ) SCREAMING_SNAKE_CASE =DataLoader(lowercase__, sampler=lowercase__, batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowercase__, lowercase__, lowercase__ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: SCREAMING_SNAKE_CASE =mask_heads(lowercase__, lowercase__, lowercase__ ) prune_heads(lowercase__, lowercase__, lowercase__, lowercase__ ) if __name__ == "__main__": main()
334
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCAmelCase = parser.parse_args() if args.check_lib: lowerCAmelCase = importlib.import_module('''transformers''') lowerCAmelCase = Path(transformers_module.__file__).parent else: lowerCAmelCase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
295
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCAmelCase = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
196
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if len(lowercase__ ) <= 1 or n <= 1: return insert_next(lowercase__ , n - 1 ) rec_insertion_sort(lowercase__ , n - 1 ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowercase, __lowercase= ( collection[index], collection[index - 1], ) insert_next(lowercase__ , index + 1 ) if __name__ == "__main__": lowerCAmelCase = input('''Enter integers separated by spaces: ''') lowerCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
295
0
'''simple docstring''' __snake_case = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __snake_case = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __snake_case = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def a ( __a , __a , __a ) -> str: '''simple docstring''' assert len(str(lowercase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: UpperCamelCase__ :str = year // 100 UpperCamelCase__ :List[Any] = (5 * (century % 4) + 2) % 7 UpperCamelCase__ :Dict = year % 100 UpperCamelCase__ :str = centurian % 12 UpperCamelCase__ :List[str] = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 UpperCamelCase__ :Optional[Any] = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) UpperCamelCase__ :List[str] = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
97
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list: '''simple docstring''' __lowercase= [] __lowercase= 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) __lowercase= index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
295
0
import pprint import requests _UpperCAmelCase = """https://zenquotes.io/api""" def UpperCamelCase ( ): '''simple docstring''' return requests.get(API_ENDPOINT_URL + '/today' ).json() def UpperCamelCase ( ): '''simple docstring''' return requests.get(API_ENDPOINT_URL + '/random' ).json() if __name__ == "__main__": _UpperCAmelCase = random_quotes() pprint.pprint(response)
140
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict: '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase__ ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''The csv file to plot.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) UpperCamelCase_ : Optional[List[str]] =list_field( default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: int(lowercase__ ) return True except ValueError: return False def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: float(lowercase__ ) return True except ValueError: return False class A : def __init__(self , lowerCAmelCase ): __lowercase= args __lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: __lowercase= csv.DictReader(lowerCAmelCase ) for row in reader: __lowercase= row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None __lowercase= int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None __lowercase= float(row['result'] ) def _A (self ): __lowercase, __lowercase= plt.subplots() __lowercase= 'Time usage' if self.args.is_time else 'Memory usage' __lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): __lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) ) __lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) ) __lowercase= self.result_dict[model_name]['result'] ((__lowercase), (__lowercase))= ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __lowercase= ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __lowercase= np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , ) else: __lowercase= np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__lowercase), (__lowercase))= ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) __lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )] plt.scatter( lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' ) plt.plot(lowerCAmelCase , lowerCAmelCase , '--' ) title_str += f' {label_model_name} vs.' __lowercase= title_str[:-4] __lowercase= 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase ) plt.xlabel(lowerCAmelCase ) plt.ylabel(lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= HfArgumentParser(lowercase__ ) __lowercase= parser.parse_args_into_dataclasses()[0] __lowercase= Plot(args=lowercase__ ) plot.plot() if __name__ == "__main__": main()
295
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class lowerCamelCase_ ( A_ ): lowerCAmelCase__ = '''ibert''' def __init__( self : str , _A : int=30_522 , _A : Optional[int]=768 , _A : List[Any]=12 , _A : Tuple=12 , _A : Dict=3_072 , _A : List[Any]="gelu" , _A : List[Any]=0.1 , _A : Tuple=0.1 , _A : Dict=512 , _A : Union[str, Any]=2 , _A : Any=0.0_2 , _A : int=1e-12 , _A : List[Any]=1 , _A : Optional[Any]=0 , _A : Optional[int]=2 , _A : Any="absolute" , _A : Optional[Any]=False , _A : List[str]="none" , **_A : int , ): '''simple docstring''' super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : Dict = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : List[str] = type_vocab_size UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : int = layer_norm_eps UpperCAmelCase__ : List[str] = position_embedding_type UpperCAmelCase__ : str = quant_mode UpperCAmelCase__ : Dict = force_dequant class lowerCamelCase_ ( A_ ): @property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase__ : str = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
181
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int =DPRContextEncoderTokenizer class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer lowerCAmelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(A_ ) class A : def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) elif titles is None or texts is None: __lowercase= titles if texts is None else texts return super().__call__( lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles] __lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts] __lowercase= len(lowerCAmelCase ) __lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.' __lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase ) ] } if return_attention_mask is not False: __lowercase= [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase= attention_mask return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ): __lowercase= reader_input['input_ids'] __lowercase, __lowercase, __lowercase= reader_output[:3] __lowercase= len(lowerCAmelCase ) __lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ ) __lowercase= [] for doc_id in sorted_docs: __lowercase= list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase= sequence_ids.index(self.pad_token_id ) else: __lowercase= len(lowerCAmelCase ) __lowercase= self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= [] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase ) __lowercase= [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __lowercase= end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class A ( A_ , A_ ): UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : Dict =DPRReaderTokenizer
295
0
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def UpperCamelCase_ ( ) -> Tuple: '''simple docstring''' __lowerCAmelCase = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" ) __lowerCAmelCase = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(lowercase__ ) DownloadCommand.register_subcommand(lowercase__ ) EnvironmentCommand.register_subcommand(lowercase__ ) RunCommand.register_subcommand(lowercase__ ) ServeCommand.register_subcommand(lowercase__ ) UserCommands.register_subcommand(lowercase__ ) AddNewModelCommand.register_subcommand(lowercase__ ) AddNewModelLikeCommand.register_subcommand(lowercase__ ) LfsCommands.register_subcommand(lowercase__ ) PTtoTFCommand.register_subcommand(lowercase__ ) # Let's go __lowerCAmelCase = parser.parse_args() if not hasattr(lowercase__ , """func""" ): parser.print_help() exit(1 ) # Run __lowerCAmelCase = args.func(lowercase__ ) service.run() if __name__ == "__main__": main()
229
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A ( nn.Module ): def __init__(self ): super().__init__() __lowercase= nn.Linear(3 , 4 ) __lowercase= nn.BatchNormad(4 ) __lowercase= nn.Linear(4 , 5 ) def _A (self , lowerCAmelCase ): return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) ) class A ( A_ ): def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): return (args[0] + 1,) + args[1:], kwargs class A ( A_ ): def _A (self , lowerCAmelCase , lowerCAmelCase ): return output + 1 class A ( unittest.TestCase ): def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(test_model._hf_hook , lowerCAmelCase ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase ) self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(x + 1 ) __lowercase= test_model(x + 2 ) __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) ) self.assertTrue(outputa.requires_grad ) __lowercase= True __lowercase= test_model(lowerCAmelCase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) ) __lowercase= torch.randn(2 , 3 ).to(0 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(0 ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload __lowercase= { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
295
0
'''simple docstring''' def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = [False] * len(lowercase__ ) __lowercase = [] queue.append(lowercase__ ) __lowercase = True while queue: __lowercase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) __lowercase = True __lowercase = u return visited[t] def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = [-1] * (len(lowercase__ )) __lowercase = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowercase = float('''Inf''' ) __lowercase = sink while s != source: # Find the minimum value in select path __lowercase = min(lowercase__ , graph[parent[s]][s] ) __lowercase = parent[s] max_flow += path_flow __lowercase = sink while v != source: __lowercase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase = parent[v] return max_flow lowerCAmelCase__ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase__ , lowerCAmelCase__ = 0, 5 print(ford_fulkerson(graph, source, sink))
104
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A ( unittest.TestCase ): def _A (self ): __lowercase= logging.get_logger() # the current default level is logging.WARNING __lowercase= logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) def _A (self ): __lowercase= logging.get_verbosity() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase ) __lowercase= logging.log_levels[env_level_str] __lowercase= logging.get_verbosity() self.assertEqual( lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __lowercase= '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowercase= logging.logging.getLogger() with CaptureLogger(lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def _A (self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
295
0
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE__ ( A_ ): '''simple docstring''' pass class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self, lowerCamelCase__ ): A : List[Any] = data A : Tuple = None def __iter__( self ): A : int = self A : int = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase__ ) yield node.data A : Union[str, Any] = node.next_node @property def _lowerCAmelCase ( self ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": SCREAMING_SNAKE_CASE_:List[Any] = Node(1) SCREAMING_SNAKE_CASE_:Union[str, Any] = Node(2) SCREAMING_SNAKE_CASE_:int = Node(3) SCREAMING_SNAKE_CASE_:Union[str, Any] = Node(4) print(root_node.has_loop) # False SCREAMING_SNAKE_CASE_:str = root_node.next_node print(root_node.has_loop) # True SCREAMING_SNAKE_CASE_:Dict = Node(5) SCREAMING_SNAKE_CASE_:Any = Node(6) SCREAMING_SNAKE_CASE_:Optional[Any] = Node(5) SCREAMING_SNAKE_CASE_:List[str] = Node(6) print(root_node.has_loop) # False SCREAMING_SNAKE_CASE_:str = Node(1) print(root_node.has_loop) # False
116
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase = '''▁''' lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } lowerCAmelCase = { '''google/pegasus-xsum''': 5_1_2, } lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= offset if additional_special_tokens is not None: if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError( f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is' f' {type(lowerCAmelCase )}' ) __lowercase= ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 ) ] if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) __lowercase= additional_special_tokens_extended else: __lowercase= [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )] __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) __lowercase= mask_token_sent __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # add special tokens to encoder dict __lowercase= { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __lowercase= {v: k for k, v in self.encoder.items()} @property def _A (self ): return len(self.sp_model ) + self.offset def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A (self , lowerCAmelCase ): return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __lowercase= self.sp_model.piece_to_id(lowerCAmelCase ) return sp_id + self.offset def _A (self , lowerCAmelCase ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __lowercase= self.sp_model.IdToPiece(index - self.offset ) return token def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def _A (self , lowerCAmelCase=False ): return 1 def _A (self , lowerCAmelCase ): __lowercase= set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return self._special_token_mask(lowerCAmelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
295
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ : int = { 'configuration_clip': [ 'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPConfig', 'CLIPOnnxConfig', 'CLIPTextConfig', 'CLIPVisionConfig', ], 'processing_clip': ['CLIPProcessor'], 'tokenization_clip': ['CLIPTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Dict = ['CLIPTokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Tuple = ['CLIPFeatureExtractor'] lowercase_ : Tuple = ['CLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : int = [ 'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPModel', 'CLIPPreTrainedModel', 'CLIPTextModel', 'CLIPTextModelWithProjection', 'CLIPVisionModel', 'CLIPVisionModelWithProjection', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : str = [ 'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFCLIPModel', 'TFCLIPPreTrainedModel', 'TFCLIPTextModel', 'TFCLIPVisionModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : int = [ 'FlaxCLIPModel', 'FlaxCLIPPreTrainedModel', 'FlaxCLIPTextModel', 'FlaxCLIPTextPreTrainedModel', 'FlaxCLIPVisionModel', 'FlaxCLIPVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys lowercase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
133
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowerCamelCase : '''simple docstring''' def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str=13 , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Tuple=99 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]=37 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Any=5_12 , lowerCAmelCase_ : Union[str, Any]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]="None" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Tuple=None , ) -> Optional[Any]: '''simple docstring''' A__ : Any =parent A__ : Any =batch_size A__ : Tuple =seq_length A__ : Tuple =is_training A__ : List[Any] =use_input_mask A__ : int =use_token_type_ids A__ : List[Any] =use_labels A__ : List[str] =vocab_size A__ : Optional[int] =hidden_size A__ : Optional[int] =num_hidden_layers A__ : str =num_attention_heads A__ : Tuple =intermediate_size A__ : Union[str, Any] =hidden_act A__ : Tuple =hidden_dropout_prob A__ : Optional[Any] =attention_probs_dropout_prob A__ : Tuple =max_position_embeddings A__ : str =type_vocab_size A__ : Dict =type_sequence_label_size A__ : List[Any] =initializer_range A__ : Any =num_labels A__ : Optional[Any] =num_choices A__ : Optional[int] =relative_attention A__ : Tuple =position_biased_input A__ : Union[str, Any] =pos_att_type A__ : Union[str, Any] =scope def lowercase__ ( self : Dict ) -> str: '''simple docstring''' A__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Tuple =None if self.use_input_mask: A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] ) A__ : Dict =None if self.use_token_type_ids: A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ : Union[str, Any] =None A__ : List[Any] =None A__ : Optional[Any] =None if self.use_labels: A__ : str =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : List[Any] =DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase_ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: '''simple docstring''' A__ : List[str] =TFDebertaVaModel(config=lowerCAmelCase_ ) A__ : Any ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} A__ : List[str] =[input_ids, input_mask] A__ : Optional[int] =model(lowerCAmelCase_ ) A__ : Union[str, Any] =model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ) -> Optional[int]: '''simple docstring''' A__ : List[Any] =TFDebertaVaForMaskedLM(config=lowerCAmelCase_ ) A__ : Optional[int] ={ """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } A__ : List[str] =model(lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> List[str]: '''simple docstring''' A__ : Any =self.num_labels A__ : List[str] =TFDebertaVaForSequenceClassification(config=lowerCAmelCase_ ) A__ : Optional[int] ={ """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } A__ : int =model(lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any: '''simple docstring''' A__ : int =self.num_labels A__ : Tuple =TFDebertaVaForTokenClassification(config=lowerCAmelCase_ ) A__ : int ={ """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } A__ : str =model(lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[str]: '''simple docstring''' A__ : Optional[Any] =TFDebertaVaForQuestionAnswering(config=lowerCAmelCase_ ) A__ : List[Any] ={ """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } A__ : Union[str, Any] =model(lowerCAmelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' A__ : Union[str, Any] =self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : str =config_and_inputs A__ : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase ( A_ , A_ , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) __snake_case = ( { '''feature-extraction''': TFDebertaVaModel, '''fill-mask''': TFDebertaVaForMaskedLM, '''question-answering''': TFDebertaVaForQuestionAnswering, '''text-classification''': TFDebertaVaForSequenceClassification, '''token-classification''': TFDebertaVaForTokenClassification, '''zero-shot''': TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) __snake_case = False __snake_case = False def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' A__ : Any =TFDebertaVaModelTester(self ) A__ : str =ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def lowercase__ ( self : Dict ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : Any ) -> List[str]: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowercase__ ( self : int ) -> str: '''simple docstring''' A__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ ) def lowercase__ ( self : int ) -> List[str]: '''simple docstring''' A__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ ) def lowercase__ ( self : str ) -> Optional[int]: '''simple docstring''' A__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ ) def lowercase__ ( self : int ) -> int: '''simple docstring''' A__ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ ) @slow def lowercase__ ( self : Tuple ) -> List[str]: '''simple docstring''' A__ : str =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" ) self.assertIsNotNone(lowerCAmelCase_ ) @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason="""Model not available yet""" ) def lowercase__ ( self : str ) -> Tuple: '''simple docstring''' pass @slow def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' A__ : Tuple =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" ) A__ : Union[str, Any] =tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) A__ : str =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A__ : Optional[int] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] A__ : Optional[Any] =tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 )
134
from math import isqrt def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int: '''simple docstring''' __lowercase= 0 __lowercase= 1 __lowercase= 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
295
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __UpperCamelCase : Optional[Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class SCREAMING_SNAKE_CASE : """simple docstring""" lowercase__ = field( default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} ) lowercase__ = field( default=A_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) lowercase__ = field( default=A_ , metadata={"help": "The column name of the images in the files."} ) lowercase__ = field(default=A_ , metadata={"help": "A folder containing the training data."} ) lowercase__ = field(default=A_ , metadata={"help": "A folder containing the validation data."} ) lowercase__ = field( default=0.15 , metadata={"help": "Percent to split off of train for validation."} ) lowercase__ = field( default=A_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) lowercase__ = field( default=A_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def __lowerCAmelCase ( self : Union[str, Any] ): lowerCAmelCase__ : int = {} if self.train_dir is not None: lowerCAmelCase__ : Any = self.train_dir if self.validation_dir is not None: lowerCAmelCase__ : Tuple = self.validation_dir lowerCAmelCase__ : str = data_files if data_files else None @dataclass class SCREAMING_SNAKE_CASE : """simple docstring""" lowercase__ = field( default=A_ , metadata={ "help": ( "The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch." ) } , ) lowercase__ = field( default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} ) lowercase__ = field( default=A_ , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) lowercase__ = field( default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) lowercase__ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) lowercase__ = field(default=A_ , metadata={"help": "Name or path of preprocessor config."} ) lowercase__ = field( default=A_ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) lowercase__ = field( default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} ) lowercase__ = field( default=A_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} ) @dataclass class SCREAMING_SNAKE_CASE ( A_ ): """simple docstring""" lowercase__ = field( default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} ) def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : List[Any] = torch.stack([example['''pixel_values'''] for example in examples] ) return {"pixel_values": pixel_values} def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mae''' , lowercase__ , lowercase__ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCAmelCase__ : Any = training_args.get_process_log_level() logger.setLevel(lowercase__ ) transformers.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. lowerCAmelCase__ : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCAmelCase__ : List[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. lowerCAmelCase__ : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowerCAmelCase__ : Tuple = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0: lowerCAmelCase__ : str = ds['''train'''].train_test_split(data_args.train_val_split ) lowerCAmelCase__ : int = split['''train'''] lowerCAmelCase__ : Tuple = split['''test'''] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase__ : Tuple = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: lowerCAmelCase__ : Optional[int] = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ ) elif model_args.model_name_or_path: lowerCAmelCase__ : Dict = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: lowerCAmelCase__ : int = ViTMAEConfig() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(f'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(f'New config: {config}' ) # adapt config config.update( { '''mask_ratio''': model_args.mask_ratio, '''norm_pix_loss''': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: lowerCAmelCase__ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ ) elif model_args.model_name_or_path: lowerCAmelCase__ : Optional[Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: lowerCAmelCase__ : Any = ViTImageProcessor() # create model if model_args.model_name_or_path: lowerCAmelCase__ : List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) lowerCAmelCase__ : Union[str, Any] = ViTMAEForPreTraining(lowercase__ ) if training_args.do_train: lowerCAmelCase__ : Dict = ds['''train'''].column_names else: lowerCAmelCase__ : Optional[int] = ds['''validation'''].column_names if data_args.image_column_name is not None: lowerCAmelCase__ : List[str] = data_args.image_column_name elif "image" in column_names: lowerCAmelCase__ : List[str] = '''image''' elif "img" in column_names: lowerCAmelCase__ : Dict = '''img''' else: lowerCAmelCase__ : List[str] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: lowerCAmelCase__ : Optional[int] = image_processor.size['''shortest_edge'''] else: lowerCAmelCase__ : Union[str, Any] = (image_processor.size['''height'''], image_processor.size['''width''']) lowerCAmelCase__ : Optional[Any] = Compose( [ Lambda(lambda A_ : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(A_ ): lowerCAmelCase__ : int = [transforms(lowercase__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: lowerCAmelCase__ : List[Any] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(lowercase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: lowerCAmelCase__ : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(lowercase__ ) # Compute absolute learning rate lowerCAmelCase__ : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: lowerCAmelCase__ : str = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer lowerCAmelCase__ : List[str] = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: lowerCAmelCase__ : str = None if training_args.resume_from_checkpoint is not None: lowerCAmelCase__ : Dict = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCAmelCase__ : Tuple = last_checkpoint lowerCAmelCase__ : Tuple = trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowerCAmelCase__ : Dict = trainer.evaluate() trainer.log_metrics('''eval''' , lowercase__ ) trainer.save_metrics('''eval''' , lowercase__ ) # Write model card and (optionally) push to hub lowerCAmelCase__ : Optional[int] = { '''tasks''': '''masked-auto-encoding''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-auto-encoding'''], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase__ ) else: trainer.create_model_card(**lowercase__ ) def __SCREAMING_SNAKE_CASE ( A_ ): main() if __name__ == "__main__": main()
106
from __future__ import annotations def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= 2 __lowercase= [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
295
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "facebook/deit-base-distilled-patch16-224": ( "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class a_ ( A_ ): """simple docstring""" __UpperCAmelCase = '''deit''' def __init__( self : List[Any] ,snake_case : int=768 ,snake_case : int=12 ,snake_case : List[str]=12 ,snake_case : Union[str, Any]=3072 ,snake_case : List[str]="gelu" ,snake_case : int=0.0 ,snake_case : List[str]=0.0 ,snake_case : List[str]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[Any]=224 ,snake_case : Optional[Any]=16 ,snake_case : List[Any]=3 ,snake_case : List[str]=True ,snake_case : Tuple=16 ,**snake_case : str ,): super().__init__(**snake_case ) SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =image_size SCREAMING_SNAKE_CASE =patch_size SCREAMING_SNAKE_CASE =num_channels SCREAMING_SNAKE_CASE =qkv_bias SCREAMING_SNAKE_CASE =encoder_stride class a_ ( A_ ): """simple docstring""" __UpperCAmelCase = version.parse('1.11' ) @property def _lowerCAmelCase ( self : int ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _lowerCAmelCase ( self : Optional[Any] ): return 1e-4
334
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase = None lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase = { '''t5-small''': 5_1_2, '''t5-base''': 5_1_2, '''t5-large''': 5_1_2, '''t5-3b''': 5_1_2, '''t5-11b''': 5_1_2, } class A ( A_ ): UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : List[str] =TaTokenizer UpperCamelCase_ : List[int] =[] def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens __lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= vocab_file __lowercase= False if not self.vocab_file else True __lowercase= extra_ids @staticmethod def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: __lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , ) return max_model_length def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file , lowerCAmelCase ) logger.info(f'Copy vocab file to {out_vocab_file}' ) return (out_vocab_file,) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: __lowercase= token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _A (self ): return list( set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def _A (self ): return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
295
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __lowerCAmelCase = logging.get_logger(__name__) @dataclass class __a ( A_ ): __lowercase : Optional[Any] = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **lowerCAmelCase__ ) -> Dict: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase__: Optional[Any] = deprecated_arg[3:] lowercase__: int = not kwargs.pop(lowerCAmelCase__ ) logger.warning( F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}' ) lowercase__: Any = kwargs.pop('tpu_name' , self.tpu_name ) lowercase__: List[Any] = kwargs.pop('device_idx' , self.device_idx ) lowercase__: List[Any] = kwargs.pop('eager_mode' , self.eager_mode ) lowercase__: List[str] = kwargs.pop('use_xla' , self.use_xla ) super().__init__(**lowerCAmelCase__ ) __lowercase : str = field( default=A_ , metadata={'help': 'Name of TPU'} , ) __lowercase : int = field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) __lowercase : bool = field(default=A_ , metadata={'help': 'Benchmark models in eager model.'} ) __lowercase : bool = field( default=A_ , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['tf'] ) lowercase__: Tuple = None if self.tpu: try: if self.tpu_name: lowercase__: List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowercase__: Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowercase__: Optional[int] = None return tpu @cached_property def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' requires_backends(self , ['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowercase__: str = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' ) lowercase__: Any = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' ) else: tf.config.set_visible_devices([] , 'GPU' ) # disable GPU lowercase__: Dict = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' ) return strategy @property def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['tf'] ) return self._setup_tpu is not None @property def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' requires_backends(self , ['tf'] ) return self._setup_strategy @property def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' return self.n_gpu > 0
196
from collections.abc import Sequence def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float: '''simple docstring''' if not arr: return 0 __lowercase= 0 if allow_empty_subarrays else float('-inf' ) __lowercase= 0.0 for num in arr: __lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowercase= max(lowercase__ , lowercase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
295
0
'''simple docstring''' from random import randint, random def a ( __a , __a , __a , __a = False , __a = False , __a = 5 , ) -> list: '''simple docstring''' UpperCamelCase__ :Optional[int] = [[-1] * number_of_cells] # Create a highway without any car UpperCamelCase__ :int = 0 UpperCamelCase__ :Optional[Any] = max(lowercase__ , 0 ) while i < number_of_cells: UpperCamelCase__ :int = ( randint(0 , lowercase__ ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def a ( __a , __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = 0 UpperCamelCase__ :Dict = highway_now[car_index + 1 :] for cell in range(len(lowercase__ ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(lowercase__ , -1 ) def a ( __a , __a , __a ) -> list: '''simple docstring''' UpperCamelCase__ :Tuple = len(lowercase__ ) # Beforce calculations, the highway is empty UpperCamelCase__ :Union[str, Any] = [-1] * number_of_cells for car_index in range(lowercase__ ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed UpperCamelCase__ :List[Any] = min(highway_now[car_index] + 1 , lowercase__ ) # Number of empty cell before the next car UpperCamelCase__ :str = get_distance(lowercase__ , lowercase__ ) - 1 # We can't have the car causing an accident UpperCamelCase__ :Union[str, Any] = min(next_highway[car_index] , lowercase__ ) if random() < probability: # Randomly, a driver will slow down UpperCamelCase__ :Optional[Any] = max(next_highway[car_index] - 1 , 0 ) return next_highway def a ( __a , __a , __a , __a ) -> list: '''simple docstring''' UpperCamelCase__ :List[Any] = len(highway[0] ) for i in range(lowercase__ ): UpperCamelCase__ :List[Any] = update(highway[i] , lowercase__ , lowercase__ ) UpperCamelCase__ :Any = [-1] * number_of_cells for car_index in range(lowercase__ ): UpperCamelCase__ :Any = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) UpperCamelCase__ :Union[str, Any] = (car_index + speed) % number_of_cells # Commit the change of position UpperCamelCase__ :Optional[int] = speed highway.append(lowercase__ ) return highway if __name__ == "__main__": import doctest doctest.testmod()
97
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Any =PriorTransformer UpperCamelCase_ : List[str] ='''hidden_states''' @property def _A (self ): __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _A (self ): return (4, 8) @property def _A (self ): return (4, 8) def _A (self ): __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __lowercase= self.dummy_input return init_dict, inputs_dict def _A (self ): __lowercase, __lowercase= PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(lowerCAmelCase ) __lowercase= model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _A (self ): __lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common() __lowercase= self.model_class(**lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , lowerCAmelCase ) def _A (self ): __lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __lowercase= model.to(lowerCAmelCase ) if hasattr(lowerCAmelCase , 'set_default_attn_processor' ): model.set_default_attn_processor() __lowercase= self.get_dummy_seed_input() with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] __lowercase= output[0, :5].flatten().cpu() print(lowerCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] ) self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= batch_size __lowercase= embedding_dim __lowercase= num_embeddings __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]], [3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]], # fmt: on ] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(lowerCAmelCase ) __lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase ) with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] assert list(sample.shape ) == [1, 7_6_8] __lowercase= sample[0, :8].flatten().cpu() print(lowerCAmelCase ) __lowercase= torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
295
0
from bisect import bisect from itertools import accumulate def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ,__lowercase : int ,__lowercase : Dict ): '''simple docstring''' A_ : Dict = sorted(zip(lowercase__ ,lowercase__ ) ,key=lambda __lowercase : x[0] / x[1] ,reverse=lowercase__ ) A_ , A_ : Tuple = [i[0] for i in r], [i[1] for i in r] A_ : Dict = list(accumulate(lowercase__ ) ) A_ : Union[str, Any] = bisect(lowercase__ ,lowercase__ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
140
def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if collection == []: return [] # get some information about the collection __lowercase= len(lowercase__ ) __lowercase= max(lowercase__ ) __lowercase= min(lowercase__ ) # create the counting array __lowercase= coll_max + 1 - coll_min __lowercase= [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): __lowercase= counting_arr[i] + counting_arr[i - 1] # create the output collection __lowercase= [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): __lowercase= collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
295
0
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, } UpperCamelCase__ = { '''facebook/bart-base''': 1_0_2_4, '''facebook/bart-large''': 1_0_2_4, '''facebook/bart-large-mnli''': 1_0_2_4, '''facebook/bart-large-cnn''': 1_0_2_4, '''facebook/bart-large-xsum''': 1_0_2_4, '''yjernite/bart_eli5''': 1_0_2_4, } @lru_cache() def a__ ( ) -> Dict: UpperCAmelCase__ : Any = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) UpperCAmelCase__ : Union[str, Any] = bs[:] UpperCAmelCase__ : Dict = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase__ ) cs.append(2**8 + n ) n += 1 UpperCAmelCase__ : int = [chr(lowercase__ ) for n in cs] return dict(zip(lowercase__ , lowercase__ ) ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : Any = set() UpperCAmelCase__ : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : List[str] = char return pairs class lowerCamelCase_ ( A_ ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any]="replace" , _A : Dict="<s>" , _A : Tuple="</s>" , _A : Union[str, Any]="</s>" , _A : Any="<s>" , _A : Any="<unk>" , _A : Any="<pad>" , _A : int="<mask>" , _A : Tuple=False , **_A : str , ): '''simple docstring''' UpperCAmelCase__ : str = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token UpperCAmelCase__ : str = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token UpperCAmelCase__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token UpperCAmelCase__ : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token UpperCAmelCase__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token UpperCAmelCase__ : List[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase__ : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='''utf-8''' ) as vocab_handle: UpperCAmelCase__ : int = json.load(_A ) UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()} UpperCAmelCase__ : Union[str, Any] = errors # how to handle errors in decoding UpperCAmelCase__ : Any = bytes_to_unicode() UpperCAmelCase__ : Tuple = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='''utf-8''' ) as merges_handle: UpperCAmelCase__ : List[Any] = merges_handle.read().split('''\n''' )[1:-1] UpperCAmelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase__ : Optional[int] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase__ : int = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowercase_ ( self : Dict ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self : Dict , _A : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : int = tuple(_A ) UpperCAmelCase__ : List[Any] = get_pairs(_A ) if not pairs: return token while True: UpperCAmelCase__ : Any = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = bigram UpperCAmelCase__ : Dict = [] UpperCAmelCase__ : str = 0 while i < len(_A ): try: UpperCAmelCase__ : Any = word.index(_A , _A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : List[Any] = j if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Tuple = tuple(_A ) UpperCAmelCase__ : int = new_word if len(_A ) == 1: break else: UpperCAmelCase__ : str = get_pairs(_A ) UpperCAmelCase__ : Optional[int] = ''' '''.join(_A ) UpperCAmelCase__ : str = word return word def lowercase_ ( self : Optional[Any] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : str = [] for token in re.findall(self.pat , _A ): UpperCAmelCase__ : Union[str, Any] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(''' ''' ) ) return bpe_tokens def lowercase_ ( self : str , _A : Union[str, Any] ): '''simple docstring''' return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def lowercase_ ( self : str , _A : str ): '''simple docstring''' return self.decoder.get(_A ) def lowercase_ ( self : Union[str, Any] , _A : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = ''''''.join(_A ) UpperCAmelCase__ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def lowercase_ ( self : str , _A : Optional[Any] , _A : str = None ): '''simple docstring''' if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase__ : Dict = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : Optional[Any] = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' ) UpperCAmelCase__ : int = 0 with open(_A , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) UpperCAmelCase__ : Tuple = token_index writer.write(''' '''.join(_A ) + '''\n''' ) index += 1 return vocab_file, merge_file def lowercase_ ( self : Tuple , _A : int , _A : List[Any] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : List[Any] = [self.cls_token_id] UpperCAmelCase__ : Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase_ ( self : List[str] , _A : Tuple , _A : int = None , _A : List[Any] = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return [1] + ([0] * len(_A )) + [1] return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1] def lowercase_ ( self : int , _A : Dict , _A : Tuple = None ): '''simple docstring''' UpperCAmelCase__ : Dict = [self.sep_token_id] UpperCAmelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self : int , _A : Optional[int] , _A : Any=False , **_A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()): UpperCAmelCase__ : Any = ''' ''' + text return (text, kwargs)
181
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_input_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _A (self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForMaskedLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForTokenClassification(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_choices __lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Any =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase_ : Optional[int] =( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : str =True UpperCamelCase_ : str =True UpperCamelCase_ : Union[str, Any] =True UpperCamelCase_ : Optional[int] =True def _A (self ): __lowercase= DistilBertModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase ) @slow def _A (self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= DistilBertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __lowercase= True __lowercase= model_class(config=lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= torch.jit.trace( lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) ) __lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase ) loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' ) __lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0] __lowercase= torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , lowerCAmelCase ) __lowercase= torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
295
0
'''simple docstring''' def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] ) -> List[Any]: '''simple docstring''' __lowerCAmelCase = [0 for i in range(r + 1 )] # nc0 = 1 __lowerCAmelCase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __lowerCAmelCase = min(lowercase__ , lowercase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
229
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= [False] * len(lowercase__ ) __lowercase= [] queue.append(lowercase__ ) __lowercase= True while queue: __lowercase= queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) __lowercase= True __lowercase= u return visited[t] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= [-1] * (len(lowercase__ )) __lowercase= 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowercase= float('Inf' ) __lowercase= sink while s != source: # Find the minimum value in select path __lowercase= min(lowercase__ , graph[parent[s]][s] ) __lowercase= parent[s] max_flow += path_flow __lowercase= sink while v != source: __lowercase= parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase= parent[v] return max_flow lowerCAmelCase = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase ,lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
295
0
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _A ( A__ , A__=0.9_9_9 , A__="cosine" , ): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(A__ ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A__ ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" ) __lowercase = [] for i in range(lowercase__ ): __lowercase = i / num_diffusion_timesteps __lowercase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) ) return torch.tensor(lowercase__ , dtype=torch.floataa ) class lowercase_ (A_ , A_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [e.name for e in KarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE : int = 2 @register_to_config def __init__( self : Optional[int] ,lowercase__ : Optional[Any] = 1_0_0_0 ,lowercase__ : Any = 0.0_0_0_8_5 ,lowercase__ : str = 0.0_1_2 ,lowercase__ : Tuple = "linear" ,lowercase__ : Optional[Any] = None ,lowercase__ : Optional[Any] = "epsilon" ,lowercase__ : Dict = "linspace" ,lowercase__ : Dict = 0 ,): if trained_betas is not None: __lowercase = torch.tensor(lowercase__ ,dtype=torch.floataa ) elif beta_schedule == "linear": __lowercase = torch.linspace(lowercase__ ,lowercase__ ,lowercase__ ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __lowercase = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,lowercase__ ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __lowercase = betas_for_alpha_bar(lowercase__ ) else: raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" ) __lowercase = 1.0 - self.betas __lowercase = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(lowercase__ ,lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ,lowercase__ : Optional[int]=None ): if schedule_timesteps is None: __lowercase = self.timesteps __lowercase = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __lowercase = 1 if len(lowercase__ ) > 1 else 0 else: __lowercase = timestep.cpu().item() if torch.is_tensor(lowercase__ ) else timestep __lowercase = self._index_counter[timestep_int] return indices[pos].item() @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Any ,lowercase__ : List[str] ,): __lowercase = self.index_for_timestep(lowercase__ ) if self.state_in_first_order: __lowercase = self.sigmas[step_index] else: __lowercase = self.sigmas_interpol[step_index] __lowercase = sample / ((sigma**2 + 1) ** 0.5) return sample def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Union[str, Any] = None ,lowercase__ : Dict = None ,): __lowercase = num_inference_steps __lowercase = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __lowercase = np.linspace(0 ,num_train_timesteps - 1 ,lowercase__ ,dtype=lowercase__ )[::-1].copy() elif self.config.timestep_spacing == "leading": __lowercase = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowercase = (np.arange(0 ,lowercase__ ) * step_ratio).round()[::-1].copy().astype(lowercase__ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __lowercase = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowercase = (np.arange(lowercase__ ,0 ,-step_ratio )).round().copy().astype(lowercase__ ) timesteps -= 1 else: raise ValueError( F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'." ) __lowercase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __lowercase = torch.from_numpy(np.log(lowercase__ ) ).to(lowercase__ ) __lowercase = np.interp(lowercase__ ,np.arange(0 ,len(lowercase__ ) ) ,lowercase__ ) __lowercase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __lowercase = torch.from_numpy(lowercase__ ).to(device=lowercase__ ) # interpolate sigmas __lowercase = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp() __lowercase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) __lowercase = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(lowercase__ ).startswith('''mps''' ): # mps does not support float64 __lowercase = torch.from_numpy(lowercase__ ).to(lowercase__ ,dtype=torch.floataa ) else: __lowercase = torch.from_numpy(lowercase__ ).to(lowercase__ ) # interpolate timesteps __lowercase = self.sigma_to_t(lowercase__ ).to(lowercase__ ,dtype=timesteps.dtype ) __lowercase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten() __lowercase = torch.cat([timesteps[:1], interleaved_timesteps] ) __lowercase = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __lowercase = defaultdict(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ): # get log sigma __lowercase = sigma.log() # get distribution __lowercase = log_sigma - self.log_sigmas[:, None] # get sigmas range __lowercase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) __lowercase = low_idx + 1 __lowercase = self.log_sigmas[low_idx] __lowercase = self.log_sigmas[high_idx] # interpolate sigmas __lowercase = (low - log_sigma) / (low - high) __lowercase = w.clamp(0 ,1 ) # transform interpolation to time range __lowercase = (1 - w) * low_idx + w * high_idx __lowercase = t.view(sigma.shape ) return t @property def SCREAMING_SNAKE_CASE ( self : List[str] ): return self.sample is None def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple = True ,): __lowercase = self.index_for_timestep(lowercase__ ) # advance index counter by 1 __lowercase = timestep.cpu().item() if torch.is_tensor(lowercase__ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __lowercase = self.sigmas[step_index] __lowercase = self.sigmas_interpol[step_index + 1] __lowercase = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method __lowercase = self.sigmas[step_index - 1] __lowercase = self.sigmas_interpol[step_index] __lowercase = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __lowercase = 0 __lowercase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __lowercase = sigma_hat if self.state_in_first_order else sigma_interpol __lowercase = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __lowercase = sigma_hat if self.state_in_first_order else sigma_interpol __lowercase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('''prediction_type not implemented yet: sample''' ) else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __lowercase = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __lowercase = sigma_interpol - sigma_hat # store for 2nd order step __lowercase = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order __lowercase = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep __lowercase = sigma_next - sigma_hat __lowercase = self.sample __lowercase = None __lowercase = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : str ,): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowercase = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(lowercase__ ): # mps does not support float64 __lowercase = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) __lowercase = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: __lowercase = self.timesteps.to(original_samples.device ) __lowercase = timesteps.to(original_samples.device ) __lowercase = [self.index_for_timestep(lowercase__ ,lowercase__ ) for t in timesteps] __lowercase = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __lowercase = sigma.unsqueeze(-1 ) __lowercase = original_samples + noise * sigma return noisy_samples def __len__( self : Any ): return self.config.num_train_timesteps
104
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= get_failure_array(lowercase__ ) # 2) Step through text searching for pattern __lowercase, __lowercase= 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase= failure[j - 1] continue i += 1 return False def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0] __lowercase= 0 __lowercase= 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase= failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase = '''abc1abc12''' lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCAmelCase = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase = '''ABABX''' lowerCAmelCase = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCAmelCase = '''AAAB''' lowerCAmelCase = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCAmelCase = '''abcdabcy''' lowerCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCAmelCase = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
295
0
from math import sqrt def __UpperCamelCase ( _lowerCAmelCase ) -> int: """simple docstring""" A : Optional[int] = 0 for i in range(1 , int(sqrt(lowercase__ ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase__ ): total += i + n // i elif i == sqrt(lowercase__ ): total += i return total - n def __UpperCamelCase ( _lowerCAmelCase = 1_0000 ) -> int: """simple docstring""" A : int = sum( i for i in range(1 , lowercase__ ) if sum_of_divisors(sum_of_divisors(lowercase__ ) ) == i and sum_of_divisors(lowercase__ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
116
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
0
import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib lowercase_ : str = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } lowercase_ : Any = logging.WARNING def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' _UpperCAmelCase = os.getenv("DATASETS_VERBOSITY" , lowercase__ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' return __name__.split("." )[0] def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' return logging.getLogger(_get_library_name() ) def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' _UpperCAmelCase = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' _UpperCAmelCase = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def __SCREAMING_SNAKE_CASE ( snake_case_ = None ): '''simple docstring''' if name is None: _UpperCAmelCase = _get_library_name() return logging.getLogger(lowercase__ ) def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' return _get_library_root_logger().getEffectiveLevel() def __SCREAMING_SNAKE_CASE ( snake_case_ ): '''simple docstring''' _get_library_root_logger().setLevel(lowercase__ ) def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' return set_verbosity(lowercase__ ) def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' return set_verbosity(lowercase__ ) def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' return set_verbosity(lowercase__ ) def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' return set_verbosity(lowercase__ ) def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' _UpperCAmelCase = False def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' _UpperCAmelCase = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class __lowerCAmelCase : def __init__( self : str , *snake_case__ : Dict , **snake_case__ : List[str] ): # pylint: disable=unused-argument """simple docstring""" _UpperCAmelCase = args[0] if args else None def __iter__( self : List[Any] ): """simple docstring""" return iter(self._iterator ) def __getattr__( self : Tuple , snake_case__ : List[Any] ): """simple docstring""" def empty_fn(*snake_case__ : int , **snake_case__ : List[Any] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : List[str] ): """simple docstring""" return self def __exit__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ): """simple docstring""" return lowercase_ : Any = True class __lowerCAmelCase : def __call__( self : List[Any] , *snake_case__ : Dict , snake_case__ : Dict=False , **snake_case__ : int ): """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*snake_case__ , **snake_case__ ) else: return EmptyTqdm(*snake_case__ , **snake_case__ ) def UpperCamelCase ( self : Any , *snake_case__ : List[str] , **snake_case__ : int ): """simple docstring""" _UpperCAmelCase = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ ) def UpperCamelCase ( self : str ): """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() lowercase_ : List[Any] = _tqdm_cls() def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' global _tqdm_active _UpperCAmelCase = True def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' global _tqdm_active _UpperCAmelCase = False
133
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A ( enum.Enum ): UpperCamelCase_ : Optional[int] =0 UpperCamelCase_ : Tuple =1 UpperCamelCase_ : Optional[int] =2 @add_end_docstrings(A_ ) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__(self , *lowerCAmelCase , **lowerCAmelCase ): super().__init__(*lowerCAmelCase , **lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowercase= None if self.model.config.prefix is not None: __lowercase= self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowercase= self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params ) __lowercase= {**self._preprocess_params, **preprocess_params} __lowercase= {**self._forward_params, **forward_params} def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ): __lowercase= {} if prefix is not None: __lowercase= prefix if prefix: __lowercase= self.tokenizer( lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected' ' [None, \'hole\']' ) __lowercase= handle_long_generation preprocess_params.update(lowerCAmelCase ) __lowercase= generate_kwargs __lowercase= {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.TENSORS if return_type is not None: __lowercase= return_type if clean_up_tokenization_spaces is not None: __lowercase= clean_up_tokenization_spaces if stop_sequence is not None: __lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) if len(lowerCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowercase= stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _A (self , *lowerCAmelCase , **lowerCAmelCase ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase ) def __call__(self , lowerCAmelCase , **lowerCAmelCase ): return super().__call__(lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prompt_text if handle_long_generation == "hole": __lowercase= inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowercase= generate_kwargs['max_new_tokens'] else: __lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowercase= self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowercase= inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowercase= inputs['attention_mask'][:, -keep_length:] return inputs def _A (self , lowerCAmelCase , **lowerCAmelCase ): __lowercase= model_inputs['input_ids'] __lowercase= model_inputs.get('attention_mask' , lowerCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: __lowercase= None __lowercase= None __lowercase= 1 else: __lowercase= input_ids.shape[0] __lowercase= model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowercase= generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowercase= 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowercase= 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase ) __lowercase= generated_sequence.shape[0] if self.framework == "pt": __lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ): __lowercase= model_outputs['generated_sequence'][0] __lowercase= model_outputs['input_ids'] __lowercase= model_outputs['prompt_text'] __lowercase= generated_sequence.numpy().tolist() __lowercase= [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowercase= {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowercase= self.tokenizer.decode( lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowercase= 0 else: __lowercase= len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) ) if return_type == ReturnType.FULL_TEXT: __lowercase= prompt_text + text[prompt_length:] else: __lowercase= text[prompt_length:] __lowercase= {'generated_text': all_text} records.append(lowerCAmelCase ) return records
295
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( A_ ): '''simple docstring''' __snake_case = 42 class lowerCamelCase ( A_ , A_ ): '''simple docstring''' @register_to_config def __init__( self : Optional[Any] , lowerCAmelCase_ : Any = 3 , lowerCAmelCase_ : Optional[int] = 3 , lowerCAmelCase_ : List[Any] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Optional[Any] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : int = (64,) , lowerCAmelCase_ : List[Any] = 1 , lowerCAmelCase_ : Union[str, Any] = "silu" , lowerCAmelCase_ : Dict = 3 , lowerCAmelCase_ : Union[str, Any] = 32 , lowerCAmelCase_ : List[str] = 2_56 , lowerCAmelCase_ : str = 32 , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : List[str] = 0.18215 , lowerCAmelCase_ : Optional[Any] = "group" , ) -> List[Any]: '''simple docstring''' super().__init__() # pass init params to Encoder A__ : Dict =Encoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , ) A__ : str =vq_embed_dim if vq_embed_dim is not None else latent_channels A__ : Optional[Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) A__ : int =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ ) A__ : List[Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) # pass init params to Decoder A__ : Union[str, Any] =Decoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , ) @apply_forward_hook def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple = True ) -> List[Any]: '''simple docstring''' A__ : int =self.encoder(lowerCAmelCase_ ) A__ : Optional[int] =self.quant_conv(lowerCAmelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase_ ) @apply_forward_hook def lowercase__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple = False , lowerCAmelCase_ : Optional[int] = True ) -> str: '''simple docstring''' # also go through quantization layer if not force_not_quantize: A__ , A__ , A__ : List[Any] =self.quantize(lowerCAmelCase_ ) else: A__ : Tuple =h A__ : str =self.post_quant_conv(lowerCAmelCase_ ) A__ : List[str] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ ) def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] = True ) -> Union[str, Any]: '''simple docstring''' A__ : Optional[int] =sample A__ : List[Any] =self.encode(lowerCAmelCase_ ).latents A__ : Optional[int] =self.decode(lowerCAmelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ )
134
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): @register_to_config def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ): super().__init__() # pass init params to Encoder __lowercase= Encoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , ) __lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) __lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase ) __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) # pass init params to Decoder __lowercase= Decoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= self.encoder(lowerCAmelCase ) __lowercase= self.quant_conv(lowerCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ): # also go through quantization layer if not force_not_quantize: __lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase ) else: __lowercase= h __lowercase= self.post_quant_conv(lowerCAmelCase ) __lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= sample __lowercase= self.encode(lowerCAmelCase ).latents __lowercase= self.decode(lowerCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase )
295
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class SCREAMING_SNAKE_CASE : """simple docstring""" lowercase__ = BlenderbotConfig lowercase__ = {} lowercase__ = '''gelu''' def __init__( self : Union[str, Any] ,lowercase_ : Optional[int] ,lowercase_ : int=1_3 ,lowercase_ : Tuple=7 ,lowercase_ : Tuple=True ,lowercase_ : Optional[int]=False ,lowercase_ : List[Any]=9_9 ,lowercase_ : List[str]=3_2 ,lowercase_ : Dict=2 ,lowercase_ : Optional[Any]=4 ,lowercase_ : int=3_7 ,lowercase_ : int=0.1 ,lowercase_ : Union[str, Any]=0.1 ,lowercase_ : Optional[Any]=2_0 ,lowercase_ : Optional[Any]=2 ,lowercase_ : Tuple=1 ,lowercase_ : str=0 ,): lowerCAmelCase__ : List[Any] = parent lowerCAmelCase__ : List[Any] = batch_size lowerCAmelCase__ : Optional[Any] = seq_length lowerCAmelCase__ : int = is_training lowerCAmelCase__ : List[str] = use_labels lowerCAmelCase__ : Tuple = vocab_size lowerCAmelCase__ : Dict = hidden_size lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Union[str, Any] = num_attention_heads lowerCAmelCase__ : str = intermediate_size lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : str = attention_probs_dropout_prob lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Tuple = eos_token_id lowerCAmelCase__ : Dict = pad_token_id lowerCAmelCase__ : List[Any] = bos_token_id def __lowerCAmelCase ( self : Any ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) lowerCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) lowerCAmelCase__ : Dict = tf.concat([input_ids, eos_tensor] ,axis=1 ) lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCAmelCase__ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) lowerCAmelCase__ : List[Any] = prepare_blenderbot_inputs_dict(lowercase_ ,lowercase_ ,lowercase_ ) return config, inputs_dict def __lowerCAmelCase ( self : int ,lowercase_ : Optional[Any] ,lowercase_ : Dict ): lowerCAmelCase__ : Optional[int] = TFBlenderbotModel(config=lowercase_ ).get_decoder() lowerCAmelCase__ : Union[str, Any] = inputs_dict['''input_ids'''] lowerCAmelCase__ : Optional[Any] = input_ids[:1, :] lowerCAmelCase__ : int = inputs_dict['''attention_mask'''][:1, :] lowerCAmelCase__ : int = inputs_dict['''head_mask'''] lowerCAmelCase__ : int = 1 # first forward pass lowerCAmelCase__ : Tuple = model(lowercase_ ,attention_mask=lowercase_ ,head_mask=lowercase_ ,use_cache=lowercase_ ) lowerCAmelCase__ ,lowerCAmelCase__ : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase__ : Any = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowerCAmelCase__ : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and lowerCAmelCase__ : int = tf.concat([input_ids, next_tokens] ,axis=-1 ) lowerCAmelCase__ : List[str] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) lowerCAmelCase__ : Optional[Any] = model(lowercase_ ,attention_mask=lowercase_ )[0] lowerCAmelCase__ : Tuple = model(lowercase_ ,attention_mask=lowercase_ ,past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice lowerCAmelCase__ : Optional[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) lowerCAmelCase__ : str = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase__ : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_ ,lowercase_ ,rtol=1E-3 ) def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=None , A_=None , A_=None , A_=None , A_=None , ): if attention_mask is None: lowerCAmelCase__ : Any = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCAmelCase__ : Any = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCAmelCase__ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class SCREAMING_SNAKE_CASE ( A_ , A_ , unittest.TestCase ): """simple docstring""" lowercase__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () lowercase__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () lowercase__ = ( { '''conversational''': TFBlenderbotForConditionalGeneration, '''feature-extraction''': TFBlenderbotModel, '''summarization''': TFBlenderbotForConditionalGeneration, '''text2text-generation''': TFBlenderbotForConditionalGeneration, '''translation''': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) lowercase__ = True lowercase__ = False lowercase__ = False def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : List[str] = TFBlenderbotModelTester(self ) lowerCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=lowercase_ ) def __lowerCAmelCase ( self : Optional[int] ): self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Dict ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) @require_tokenizers @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" lowercase__ = ['''My friends are cool but they eat too many carbs.'''] lowercase__ = '''facebook/blenderbot-400M-distill''' @cached_property def __lowerCAmelCase ( self : str ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def __lowerCAmelCase ( self : int ): lowerCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def __lowerCAmelCase ( self : List[str] ): lowerCAmelCase__ : Union[str, Any] = self.tokenizer(self.src_text ,return_tensors='''tf''' ) lowerCAmelCase__ : Any = self.model.generate( model_inputs.input_ids ,) lowerCAmelCase__ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=lowercase_ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
106
import os import numpy import onnx def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= a.name __lowercase= b.name __lowercase= '' __lowercase= '' __lowercase= a == b __lowercase= name_a __lowercase= name_b return res def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase__ , lowercase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) _graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(lowercase__ , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= list(model.graph.initializer ) __lowercase= list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __lowercase= inits[i].name __lowercase= inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' __lowercase= os.path.dirname(lowercase__ ) __lowercase= os.path.basename(lowercase__ ) __lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) ) __lowercase= list(model.graph.initializer ) __lowercase= set() __lowercase= {} __lowercase= [] __lowercase= 0 for i in range(len(lowercase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowercase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowercase__ ) dup_set.add(lowercase__ ) __lowercase= inits[j].data_type __lowercase= numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 1_1: mem_size *= 8 else: print('unexpected data type: ' , lowercase__ ) total_reduced_size += mem_size __lowercase= inits[i].name __lowercase= inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase__ ) else: __lowercase= [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' ) __lowercase= sorted(lowercase__ ) _remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ ) __lowercase= 'optimized_' + model_file_name __lowercase= os.path.join(lowercase__ , lowercase__ ) onnx.save(lowercase__ , lowercase__ ) return new_model
295
0
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class a_ ( enum.Enum ): """simple docstring""" __UpperCAmelCase = 0 __UpperCAmelCase = 1 __UpperCAmelCase = 2 @add_end_docstrings(A_ ) class a_ ( A_ ): """simple docstring""" __UpperCAmelCase = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self : str ,*snake_case : Tuple ,**snake_case : Any ): super().__init__(*snake_case ,**snake_case ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. SCREAMING_SNAKE_CASE =None if self.model.config.prefix is not None: SCREAMING_SNAKE_CASE =self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. SCREAMING_SNAKE_CASE =self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self._sanitize_parameters(prefix=snake_case ,**self._forward_params ) SCREAMING_SNAKE_CASE ={**self._preprocess_params, **preprocess_params} SCREAMING_SNAKE_CASE ={**self._forward_params, **forward_params} def _lowerCAmelCase ( self : List[str] ,snake_case : Union[str, Any]=None ,snake_case : Optional[Any]=None ,snake_case : Optional[Any]=None ,snake_case : str=None ,snake_case : Optional[Any]=None ,snake_case : Tuple=None ,snake_case : Tuple=None ,snake_case : Tuple=None ,**snake_case : List[str] ,): SCREAMING_SNAKE_CASE ={} if prefix is not None: SCREAMING_SNAKE_CASE =prefix if prefix: SCREAMING_SNAKE_CASE =self.tokenizer( snake_case ,padding=snake_case ,add_special_tokens=snake_case ,return_tensors=self.framework ) SCREAMING_SNAKE_CASE =prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected' ' [None, \'hole\']' ) SCREAMING_SNAKE_CASE =handle_long_generation preprocess_params.update(snake_case ) SCREAMING_SNAKE_CASE =generate_kwargs SCREAMING_SNAKE_CASE ={} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) SCREAMING_SNAKE_CASE =ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) SCREAMING_SNAKE_CASE =ReturnType.TENSORS if return_type is not None: SCREAMING_SNAKE_CASE =return_type if clean_up_tokenization_spaces is not None: SCREAMING_SNAKE_CASE =clean_up_tokenization_spaces if stop_sequence is not None: SCREAMING_SNAKE_CASE =self.tokenizer.encode(snake_case ,add_special_tokens=snake_case ) if len(snake_case ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) SCREAMING_SNAKE_CASE =stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _lowerCAmelCase ( self : Optional[int] ,*snake_case : Optional[int] ,**snake_case : Union[str, Any] ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*snake_case ,**snake_case ) def __call__( self : List[Any] ,snake_case : int ,**snake_case : Any ): return super().__call__(snake_case ,**snake_case ) def _lowerCAmelCase ( self : Dict ,snake_case : Tuple ,snake_case : Optional[Any]="" ,snake_case : Tuple=None ,**snake_case : int ): SCREAMING_SNAKE_CASE =self.tokenizer( prefix + prompt_text ,padding=snake_case ,add_special_tokens=snake_case ,return_tensors=self.framework ) SCREAMING_SNAKE_CASE =prompt_text if handle_long_generation == "hole": SCREAMING_SNAKE_CASE =inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: SCREAMING_SNAKE_CASE =generate_kwargs['max_new_tokens'] else: SCREAMING_SNAKE_CASE =generate_kwargs.get('max_length' ,self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: SCREAMING_SNAKE_CASE =self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) SCREAMING_SNAKE_CASE =inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: SCREAMING_SNAKE_CASE =inputs['attention_mask'][:, -keep_length:] return inputs def _lowerCAmelCase ( self : Tuple ,snake_case : Optional[Any] ,**snake_case : Union[str, Any] ): SCREAMING_SNAKE_CASE =model_inputs['input_ids'] SCREAMING_SNAKE_CASE =model_inputs.get('attention_mask' ,snake_case ) # Allow empty prompts if input_ids.shape[1] == 0: SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =1 else: SCREAMING_SNAKE_CASE =input_ids.shape[0] SCREAMING_SNAKE_CASE =model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. SCREAMING_SNAKE_CASE =generate_kwargs.pop('prefix_length' ,0 ) if prefix_length > 0: SCREAMING_SNAKE_CASE ='max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: SCREAMING_SNAKE_CASE =generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length SCREAMING_SNAKE_CASE ='min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL SCREAMING_SNAKE_CASE =self.model.generate(input_ids=snake_case ,attention_mask=snake_case ,**snake_case ) SCREAMING_SNAKE_CASE =generated_sequence.shape[0] if self.framework == "pt": SCREAMING_SNAKE_CASE =generated_sequence.reshape(snake_case ,out_b // in_b ,*generated_sequence.shape[1:] ) elif self.framework == "tf": SCREAMING_SNAKE_CASE =tf.reshape(snake_case ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _lowerCAmelCase ( self : Optional[Any] ,snake_case : str ,snake_case : Union[str, Any]=ReturnType.FULL_TEXT ,snake_case : Optional[Any]=True ): SCREAMING_SNAKE_CASE =model_outputs['generated_sequence'][0] SCREAMING_SNAKE_CASE =model_outputs['input_ids'] SCREAMING_SNAKE_CASE =model_outputs['prompt_text'] SCREAMING_SNAKE_CASE =generated_sequence.numpy().tolist() SCREAMING_SNAKE_CASE =[] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: SCREAMING_SNAKE_CASE ={'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text SCREAMING_SNAKE_CASE =self.tokenizer.decode( snake_case ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ,) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: SCREAMING_SNAKE_CASE =0 else: SCREAMING_SNAKE_CASE =len( self.tokenizer.decode( input_ids[0] ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ,) ) if return_type == ReturnType.FULL_TEXT: SCREAMING_SNAKE_CASE =prompt_text + text[prompt_length:] else: SCREAMING_SNAKE_CASE =text[prompt_length:] SCREAMING_SNAKE_CASE ={'generated_text': all_text} records.append(snake_case ) return records
334
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCAmelCase = parser.parse_args() if args.check_lib: lowerCAmelCase = importlib.import_module('''transformers''') lowerCAmelCase = Path(transformers_module.__file__).parent else: lowerCAmelCase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
295
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
196
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if len(lowercase__ ) <= 1 or n <= 1: return insert_next(lowercase__ , n - 1 ) rec_insertion_sort(lowercase__ , n - 1 ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowercase, __lowercase= ( collection[index], collection[index - 1], ) insert_next(lowercase__ , index + 1 ) if __name__ == "__main__": lowerCAmelCase = input('''Enter integers separated by spaces: ''') lowerCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
295
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig __snake_case = logging.get_logger(__name__) __snake_case = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class lowercase ( A_ ): """simple docstring""" _a = '''dpt''' def __init__( self , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=384 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=[2, 5, 8, 11] , UpperCamelCase_="project" , UpperCamelCase_=[4, 2, 1, 0.5] , UpperCamelCase_=[96, 192, 384, 768] , UpperCamelCase_=256 , UpperCamelCase_=-1 , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0.4 , UpperCamelCase_=255 , UpperCamelCase_=0.1 , UpperCamelCase_=[1, 1024, 24, 24] , UpperCamelCase_=[0, 1] , UpperCamelCase_=None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCamelCase__ :Any = hidden_size UpperCamelCase__ :Optional[int] = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('''Initializing the config with a `BiT` backbone.''' ) UpperCamelCase__ :int = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, } UpperCamelCase__ :str = BitConfig(**UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): logger.info('''Initializing the config with a `BiT` backbone.''' ) UpperCamelCase__ :Dict = BitConfig(**UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase__ :Tuple = backbone_config else: raise ValueError( F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) UpperCamelCase__ :Dict = backbone_featmap_shape UpperCamelCase__ :Union[str, Any] = neck_ignore_stages if readout_type != "project": raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' ) else: UpperCamelCase__ :Optional[Any] = None UpperCamelCase__ :List[Any] = None UpperCamelCase__ :Union[str, Any] = [] UpperCamelCase__ :List[str] = num_hidden_layers UpperCamelCase__ :Dict = num_attention_heads UpperCamelCase__ :Optional[Any] = intermediate_size UpperCamelCase__ :Optional[Any] = hidden_act UpperCamelCase__ :Optional[Any] = hidden_dropout_prob UpperCamelCase__ :Optional[Any] = attention_probs_dropout_prob UpperCamelCase__ :Optional[Any] = initializer_range UpperCamelCase__ :Optional[int] = layer_norm_eps UpperCamelCase__ :str = image_size UpperCamelCase__ :Tuple = patch_size UpperCamelCase__ :List[str] = num_channels UpperCamelCase__ :List[str] = qkv_bias UpperCamelCase__ :str = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' ) UpperCamelCase__ :str = readout_type UpperCamelCase__ :Any = reassemble_factors UpperCamelCase__ :List[str] = neck_hidden_sizes UpperCamelCase__ :Union[str, Any] = fusion_hidden_size UpperCamelCase__ :Tuple = head_in_index UpperCamelCase__ :str = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) UpperCamelCase__ :Union[str, Any] = use_auxiliary_head UpperCamelCase__ :Union[str, Any] = auxiliary_loss_weight UpperCamelCase__ :Tuple = semantic_loss_ignore_index UpperCamelCase__ :int = semantic_classifier_dropout def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCamelCase__ :Union[str, Any] = self.backbone_config.to_dict() UpperCamelCase__ :List[Any] = self.__class__.model_type return output
97
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list: '''simple docstring''' __lowercase= [] __lowercase= 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) __lowercase= index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
295
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _UpperCAmelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase ( A_ ): '''simple docstring''' lowerCamelCase_ = ['''pixel_values'''] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 2_5_5 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ): """simple docstring""" super().__init__(**lowercase ) A_ : Union[str, Any] = size if size is not None else {'shortest_edge': 2_2_4} A_ : List[Any] = get_size_dict(lowercase , default_to_square=lowercase ) A_ : Tuple = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} A_ : Optional[int] = get_size_dict(lowercase , default_to_square=lowercase , param_name='crop_size' ) A_ : List[str] = do_resize A_ : List[str] = size A_ : Dict = resample A_ : List[Any] = do_center_crop A_ : Tuple = crop_size A_ : Optional[int] = do_rescale A_ : List[str] = rescale_factor A_ : List[str] = do_normalize A_ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Optional[int] = do_convert_rgb def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ): """simple docstring""" A_ : str = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) A_ : List[Any] = get_resize_output_image_size(lowercase , size=size['shortest_edge'] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ): """simple docstring""" A_ : Dict = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ): """simple docstring""" return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ): """simple docstring""" return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): """simple docstring""" A_ : Any = do_resize if do_resize is not None else self.do_resize A_ : str = size if size is not None else self.size A_ : Optional[int] = get_size_dict(lowercase , param_name='size' , default_to_square=lowercase ) A_ : List[str] = resample if resample is not None else self.resample A_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Optional[int] = crop_size if crop_size is not None else self.crop_size A_ : Tuple = get_size_dict(lowercase , param_name='crop_size' , default_to_square=lowercase ) A_ : Dict = do_rescale if do_rescale is not None else self.do_rescale A_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : str = do_normalize if do_normalize is not None else self.do_normalize A_ : List[Any] = image_mean if image_mean is not None else self.image_mean A_ : Tuple = image_std if image_std is not None else self.image_std A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : Union[str, Any] = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : Optional[Any] = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A_ : str = [to_numpy_array(lowercase ) for image in images] if do_resize: A_ : Dict = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: A_ : Optional[Any] = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: A_ : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A_ : Optional[Any] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A_ : Dict = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A_ : List[str] = {'pixel_values': images} return BatchFeature(data=lowercase , tensor_type=lowercase )
140
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict: '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase__ ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''The csv file to plot.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) UpperCamelCase_ : Optional[List[str]] =list_field( default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: int(lowercase__ ) return True except ValueError: return False def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: float(lowercase__ ) return True except ValueError: return False class A : def __init__(self , lowerCAmelCase ): __lowercase= args __lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: __lowercase= csv.DictReader(lowerCAmelCase ) for row in reader: __lowercase= row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None __lowercase= int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None __lowercase= float(row['result'] ) def _A (self ): __lowercase, __lowercase= plt.subplots() __lowercase= 'Time usage' if self.args.is_time else 'Memory usage' __lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): __lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) ) __lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) ) __lowercase= self.result_dict[model_name]['result'] ((__lowercase), (__lowercase))= ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __lowercase= ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __lowercase= np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , ) else: __lowercase= np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__lowercase), (__lowercase))= ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) __lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )] plt.scatter( lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' ) plt.plot(lowerCAmelCase , lowerCAmelCase , '--' ) title_str += f' {label_model_name} vs.' __lowercase= title_str[:-4] __lowercase= 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase ) plt.xlabel(lowerCAmelCase ) plt.ylabel(lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= HfArgumentParser(lowercase__ ) __lowercase= parser.parse_args_into_dataclasses()[0] __lowercase= Plot(args=lowercase__ ) plot.plot() if __name__ == "__main__": main()
295
0
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def a__ ( lowerCAmelCase__ ) -> Optional[int]: return 1 / (1 + np.exp(-z )) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: return (-y * np.log(lowercase__ ) - (1 - y) * np.log(1 - h )).mean() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = np.dot(lowercase__ , lowercase__ ) return np.sum(y * scores - np.log(1 + np.exp(lowercase__ ) ) ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> Optional[int]: UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] ) for iterations in range(lowercase__ ): UpperCAmelCase__ : str = np.dot(lowercase__ , lowercase__ ) UpperCAmelCase__ : Union[str, Any] = sigmoid_function(lowercase__ ) UpperCAmelCase__ : Tuple = np.dot(x.T , h - y ) / y.size UpperCAmelCase__ : List[Any] = theta - alpha * gradient # updating the weights UpperCAmelCase__ : Dict = np.dot(lowercase__ , lowercase__ ) UpperCAmelCase__ : Union[str, Any] = sigmoid_function(lowercase__ ) UpperCAmelCase__ : Optional[Any] = cost_function(lowercase__ , lowercase__ ) if iterations % 1_00 == 0: print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase__ = datasets.load_iris() UpperCamelCase__ = iris.data[:, :2] UpperCamelCase__ = (iris.target != 0) * 1 UpperCamelCase__ = 0.1 UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print('''theta: ''', theta) # printing the theta i.e our weights vector def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: return sigmoid_function( np.dot(lowercase__ , lowercase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
181
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int =DPRContextEncoderTokenizer class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer lowerCAmelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(A_ ) class A : def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) elif titles is None or texts is None: __lowercase= titles if texts is None else texts return super().__call__( lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles] __lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts] __lowercase= len(lowerCAmelCase ) __lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.' __lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase ) ] } if return_attention_mask is not False: __lowercase= [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase= attention_mask return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ): __lowercase= reader_input['input_ids'] __lowercase, __lowercase, __lowercase= reader_output[:3] __lowercase= len(lowerCAmelCase ) __lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ ) __lowercase= [] for doc_id in sorted_docs: __lowercase= list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase= sequence_ids.index(self.pad_token_id ) else: __lowercase= len(lowerCAmelCase ) __lowercase= self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= [] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase ) __lowercase= [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __lowercase= end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class A ( A_ , A_ ): UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : Dict =DPRReaderTokenizer
295
0
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever _A : str = logging.getLogger(__name__) class _lowercase ( A_ ): '''simple docstring''' def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> Dict: super().__init__( SCREAMING_SNAKE_CASE__ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE__ , generator_tokenizer=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , init_retrieval=SCREAMING_SNAKE_CASE__ , ) __lowerCAmelCase = None def a ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> str: logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually __lowerCAmelCase = self._infer_socket_ifname() # avoid clash with the NCCL port __lowerCAmelCase = str(distributed_port + 1 ) __lowerCAmelCase = dist.new_group(ranks=SCREAMING_SNAKE_CASE__ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def a ( self : int ) -> Tuple: return dist.get_rank(group=self.process_group ) == 0 def a ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=torch.floataa ) -> Dict: __lowerCAmelCase = torch.empty(SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ) dist.scatter(SCREAMING_SNAKE_CASE__ , src=0 , scatter_list=SCREAMING_SNAKE_CASE__ , group=self.process_group ) return target_tensor def a ( self : Tuple ) -> Tuple: __lowerCAmelCase = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __lowerCAmelCase = next((addr for addr in addrs if addr.startswith("""e""" )) , SCREAMING_SNAKE_CASE__ ) return ifname def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]: # single GPU training if not dist.is_initialized(): __lowerCAmelCase , __lowerCAmelCase = self._main_retrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE__ ) # distributed training __lowerCAmelCase = dist.get_world_size(group=self.process_group ) # gather logic __lowerCAmelCase = None if self._is_main(): __lowerCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(SCREAMING_SNAKE_CASE__ )] dist.gather(torch.tensor(SCREAMING_SNAKE_CASE__ ) , dst=0 , gather_list=SCREAMING_SNAKE_CASE__ , group=self.process_group ) # scatter logic __lowerCAmelCase = question_hidden_states.shape[0] __lowerCAmelCase = [] __lowerCAmelCase = [] if self._is_main(): assert len(SCREAMING_SNAKE_CASE__ ) == world_size __lowerCAmelCase , __lowerCAmelCase = self._main_retrieve(torch.cat(SCREAMING_SNAKE_CASE__ ).numpy() , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase , __lowerCAmelCase = torch.tensor(SCREAMING_SNAKE_CASE__ ), torch.tensor(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self._chunk_tensor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self._chunk_tensor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self._scattered(SCREAMING_SNAKE_CASE__ , [n_queries, n_docs] , target_type=torch.intaa ) __lowerCAmelCase = self._scattered(SCREAMING_SNAKE_CASE__ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(SCREAMING_SNAKE_CASE__ )
229
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A ( nn.Module ): def __init__(self ): super().__init__() __lowercase= nn.Linear(3 , 4 ) __lowercase= nn.BatchNormad(4 ) __lowercase= nn.Linear(4 , 5 ) def _A (self , lowerCAmelCase ): return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) ) class A ( A_ ): def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): return (args[0] + 1,) + args[1:], kwargs class A ( A_ ): def _A (self , lowerCAmelCase , lowerCAmelCase ): return output + 1 class A ( unittest.TestCase ): def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(test_model._hf_hook , lowerCAmelCase ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase ) self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(x + 1 ) __lowercase= test_model(x + 2 ) __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) ) self.assertTrue(outputa.requires_grad ) __lowercase= True __lowercase= test_model(lowerCAmelCase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) ) __lowercase= torch.randn(2 , 3 ).to(0 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(0 ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload __lowercase= { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
295
0
'''simple docstring''' from math import pi def _A ( A__ , A__ ): """simple docstring""" return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
104
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A ( unittest.TestCase ): def _A (self ): __lowercase= logging.get_logger() # the current default level is logging.WARNING __lowercase= logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) def _A (self ): __lowercase= logging.get_verbosity() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase ) __lowercase= logging.log_levels[env_level_str] __lowercase= logging.get_verbosity() self.assertEqual( lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __lowercase= '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowercase= logging.logging.getLogger() with CaptureLogger(lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def _A (self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
295
0
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class SCREAMING_SNAKE_CASE__ ( A_ ): '''simple docstring''' __lowerCamelCase : List[Any] = (DPMSolverSDEScheduler,) __lowerCamelCase : Optional[Any] = 10 def _lowerCAmelCase ( self, **lowerCamelCase__ ): A : Any = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**lowerCamelCase__ ) return config def _lowerCAmelCase ( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCamelCase__ ) def _lowerCAmelCase ( self ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001], [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase__, beta_end=lowerCamelCase__ ) def _lowerCAmelCase ( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCamelCase__ ) def _lowerCAmelCase ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : int = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config() A : Optional[int] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) A : int = self.dummy_model() A : str = self.dummy_sample_deter * scheduler.init_noise_sigma A : int = sample.to(lowerCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): A : List[str] = scheduler.scale_model_input(lowerCamelCase__, lowerCamelCase__ ) A : Any = model(lowerCamelCase__, lowerCamelCase__ ) A : Dict = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) A : Dict = output.prev_sample A : List[str] = torch.sum(torch.abs(lowerCamelCase__ ) ) A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def _lowerCAmelCase ( self ): A : Optional[int] = self.scheduler_classes[0] A : List[str] = self.get_scheduler_config(prediction_type="""v_prediction""" ) A : Optional[Any] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) A : List[str] = self.dummy_model() A : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma A : List[str] = sample.to(lowerCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): A : int = scheduler.scale_model_input(lowerCamelCase__, lowerCamelCase__ ) A : Optional[int] = model(lowerCamelCase__, lowerCamelCase__ ) A : Optional[int] = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) A : List[str] = output.prev_sample A : str = torch.sum(torch.abs(lowerCamelCase__ ) ) A : str = torch.mean(torch.abs(lowerCamelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def _lowerCAmelCase ( self ): A : Optional[Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[Any] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps, device=lowerCamelCase__ ) A : List[str] = self.dummy_model() A : Optional[Any] = self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: A : int = scheduler.scale_model_input(lowerCamelCase__, lowerCamelCase__ ) A : str = model(lowerCamelCase__, lowerCamelCase__ ) A : List[Any] = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) A : Union[str, Any] = output.prev_sample A : Any = torch.sum(torch.abs(lowerCamelCase__ ) ) A : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def _lowerCAmelCase ( self ): A : Optional[Any] = self.scheduler_classes[0] A : str = self.get_scheduler_config() A : Dict = scheduler_class(**lowerCamelCase__, use_karras_sigmas=lowerCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps, device=lowerCamelCase__ ) A : Dict = self.dummy_model() A : Optional[int] = self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma A : Optional[Any] = sample.to(lowerCamelCase__ ) for t in scheduler.timesteps: A : Tuple = scheduler.scale_model_input(lowerCamelCase__, lowerCamelCase__ ) A : List[Any] = model(lowerCamelCase__, lowerCamelCase__ ) A : Optional[Any] = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) A : Dict = output.prev_sample A : Dict = torch.sum(torch.abs(lowerCamelCase__ ) ) A : Any = torch.mean(torch.abs(lowerCamelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
116
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase = '''▁''' lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } lowerCAmelCase = { '''google/pegasus-xsum''': 5_1_2, } lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= offset if additional_special_tokens is not None: if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError( f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is' f' {type(lowerCAmelCase )}' ) __lowercase= ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 ) ] if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) __lowercase= additional_special_tokens_extended else: __lowercase= [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )] __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) __lowercase= mask_token_sent __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # add special tokens to encoder dict __lowercase= { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __lowercase= {v: k for k, v in self.encoder.items()} @property def _A (self ): return len(self.sp_model ) + self.offset def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A (self , lowerCAmelCase ): return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __lowercase= self.sp_model.piece_to_id(lowerCAmelCase ) return sp_id + self.offset def _A (self , lowerCAmelCase ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __lowercase= self.sp_model.IdToPiece(index - self.offset ) return token def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def _A (self , lowerCAmelCase=False ): return 1 def _A (self , lowerCAmelCase ): __lowercase= set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return self._special_token_mask(lowerCAmelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
295
0
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase_ : Tuple = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase_ : List[Any] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase_ : Union[str, Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def UpperCamelCase ( self : List[Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def UpperCamelCase ( self : Tuple , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Union[str, Any] = 1 , snake_case__ : Dict = 4 , ): """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=snake_case__ , hypotheses=snake_case__ , min_len=snake_case__ , max_len=snake_case__ ) }
133
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
0
'''simple docstring''' def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> list[list[int]]: """simple docstring""" A__ : int =[] if len(lowercase__ ) == 1: return [nums.copy()] for _ in range(len(lowercase__ ) ): A__ : List[Any] =nums.pop(0 ) A__ : List[Any] =permute(lowercase__ ) for perm in permutations: perm.append(lowercase__ ) result.extend(lowercase__ ) nums.append(lowercase__ ) return result def __lowerCamelCase ( __snake_case : Optional[int] ) -> Optional[Any]: """simple docstring""" def backtrack(__snake_case : List[str] ): if start == len(lowercase__ ) - 1: output.append(nums[:] ) else: for i in range(lowercase__, len(lowercase__ ) ): A__ , A__ : int =nums[i], nums[start] backtrack(start + 1 ) A__ , A__ : Tuple =nums[i], nums[start] # backtrack A__ : List[str] =[] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __snake_case : Optional[int] = permutea([1, 2, 3]) print(res) doctest.testmod()
134
from math import isqrt def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int: '''simple docstring''' __lowercase= 0 __lowercase= 1 __lowercase= 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
295
0
"""simple docstring""" from collections.abc import Iterable from typing import Any class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : int ,lowercase_ : List[str] = None ): lowerCAmelCase__ : Tuple = value lowerCAmelCase__ : Any = None # Added in order to delete a node easier lowerCAmelCase__ : Union[str, Any] = None lowerCAmelCase__ : Optional[Any] = None def __repr__( self : List[Any] ): from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F'{self.value}': (self.left, self.right)} ,indent=1 ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : List[Any] ,lowercase_ : List[Any] = None ): lowerCAmelCase__ : str = root def __str__( self : List[str] ): return str(self.root ) def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Union[str, Any] ,lowercase_ : Union[str, Any] ): if new_children is not None: # reset its kids lowerCAmelCase__ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(lowercase_ ): # If it is the right children lowerCAmelCase__ : int = new_children else: lowerCAmelCase__ : Optional[int] = new_children else: lowerCAmelCase__ : List[Any] = new_children def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : List[str] ): if node.parent and node.parent.right: return node == node.parent.right return False def __lowerCAmelCase ( self : Optional[Any] ): return self.root is None def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : List[str] ): lowerCAmelCase__ : int = Node(lowercase_ ) # create a new Node if self.empty(): # if Tree is empty lowerCAmelCase__ : Dict = new_node # set its root else: # Tree is not empty lowerCAmelCase__ : Dict = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: lowerCAmelCase__ : Tuple = new_node # We insert the new node in a leaf break else: lowerCAmelCase__ : Optional[int] = parent_node.left else: if parent_node.right is None: lowerCAmelCase__ : int = new_node break else: lowerCAmelCase__ : List[str] = parent_node.right lowerCAmelCase__ : Any = parent_node def __lowerCAmelCase ( self : List[Any] ,*lowercase_ : str ): for value in values: self.__insert(lowercase_ ) def __lowerCAmelCase ( self : Dict ,lowercase_ : List[str] ): if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: lowerCAmelCase__ : Optional[int] = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: lowerCAmelCase__ : str = node.left if value < node.value else node.right return node def __lowerCAmelCase ( self : Dict ,lowercase_ : Any = None ): if node is None: if self.root is None: return None lowerCAmelCase__ : int = self.root if not self.empty(): while node.right is not None: lowerCAmelCase__ : int = node.right return node def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : str = None ): if node is None: lowerCAmelCase__ : Optional[Any] = self.root if self.root is None: return None if not self.empty(): lowerCAmelCase__ : Optional[Any] = self.root while node.left is not None: lowerCAmelCase__ : Optional[int] = node.left return node def __lowerCAmelCase ( self : Tuple ,lowercase_ : str ): lowerCAmelCase__ : Tuple = self.search(lowercase_ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(lowercase_ ,lowercase_ ) elif node.left is None: # Has only right children self.__reassign_nodes(lowercase_ ,node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(lowercase_ ,node.left ) else: lowerCAmelCase__ : Dict = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore lowerCAmelCase__ : Optional[Any] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def __lowerCAmelCase ( self : str ,lowercase_ : Union[str, Any] ): if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def __lowerCAmelCase ( self : int ,lowercase_ : Optional[int]=None ): if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Dict ,lowercase_ : Tuple ): if node: self.inorder(lowercase_ ,node.left ) arr.append(node.value ) self.inorder(lowercase_ ,node.right ) def __lowerCAmelCase ( self : str ,lowercase_ : List[str] ,lowercase_ : List[Any] ): lowerCAmelCase__ : Optional[int] = [] self.inorder(lowercase_ ,lowercase_ ) # append all values to list using inorder traversal return arr[k - 1] def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : int = [] if curr_node is not None: lowerCAmelCase__ : int = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7) lowerCAmelCase__ : Optional[Any] = BinarySearchTree() for i in testlist: t.insert(lowercase__ ) # Prints all the elements of the list in order traversal print(lowercase__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''' , t.get_max().value ) # type: ignore print('''Min Value: ''' , t.get_min().value ) # type: ignore for i in testlist: t.remove(lowercase__ ) print(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
106
from __future__ import annotations def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= 2 __lowercase= [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
295
0