code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A: Optional[Any] = logging.get_logger(__name__) A: Tuple = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Optional[Any] = 'data2vec-vision' def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[3, 5, 7, 11] , _SCREAMING_SNAKE_CASE=[1, 2, 3, 6] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.4 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=255 , **_SCREAMING_SNAKE_CASE , ) -> int: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Optional[int] = num_hidden_layers UpperCAmelCase : int = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Tuple = hidden_act UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : Any = initializer_range UpperCAmelCase : List[str] = layer_norm_eps UpperCAmelCase : Any = image_size UpperCAmelCase : Tuple = patch_size UpperCAmelCase : List[Any] = num_channels UpperCAmelCase : Optional[int] = use_mask_token UpperCAmelCase : Dict = use_absolute_position_embeddings UpperCAmelCase : Any = use_relative_position_bias UpperCAmelCase : Optional[Any] = use_shared_relative_position_bias UpperCAmelCase : Dict = layer_scale_init_value UpperCAmelCase : Any = drop_path_rate UpperCAmelCase : List[Any] = use_mean_pooling # decode head attributes (semantic segmentation) UpperCAmelCase : List[Any] = out_indices UpperCAmelCase : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) UpperCAmelCase : Dict = use_auxiliary_head UpperCAmelCase : List[str] = auxiliary_loss_weight UpperCAmelCase : List[Any] = auxiliary_channels UpperCAmelCase : List[str] = auxiliary_num_convs UpperCAmelCase : str = auxiliary_concat_input UpperCAmelCase : Optional[int] = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : List[str] = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def SCREAMING_SNAKE_CASE ( self ) -> float: '''simple docstring''' return 1E-4
109
import operator as op a__ = '''scaler.pt''' a__ = '''pytorch_model''' a__ = '''random_states''' a__ = '''optimizer''' a__ = '''scheduler''' a__ = '''pytorch_model.bin''' a__ = '''pytorch_model.bin.index.json''' a__ = '''model.safetensors''' a__ = '''model.safetensors.index.json''' a__ = '''1.10.2''' a__ = '''py38''' a__ = '''4.17.0''' a__ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge'''] a__ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2'''] a__ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP'''] a__ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH'''] a__ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT'''] a__ = '''2.0.1''' a__ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich'''] a__ = ['''default''', '''reduce-overhead''', '''max-autotune'''] a__ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 a__ = [ '''nnodes''', '''nproc_per_node''', '''rdzv_backend''', '''rdzv_endpoint''', '''rdzv_id''', '''rdzv_conf''', '''standalone''', '''max_restarts''', '''monitor_interval''', '''start_method''', '''role''', '''module''', '''m''', '''no_python''', '''run_path''', '''log_dir''', '''r''', '''redirects''', '''t''', '''tee''', '''node_rank''', '''master_addr''', '''master_port''', ] a__ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM'''] a__ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
235
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=10, A=18, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=None, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {'shortest_edge': 18} SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE : Optional[int] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : List[str] = num_frames SCREAMING_SNAKE_CASE : Optional[Any] = image_size SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : Optional[Any] = max_resolution SCREAMING_SNAKE_CASE : List[Any] = do_resize SCREAMING_SNAKE_CASE : Optional[int] = size SCREAMING_SNAKE_CASE : Any = do_normalize SCREAMING_SNAKE_CASE : List[Any] = image_mean SCREAMING_SNAKE_CASE : List[str] = image_std SCREAMING_SNAKE_CASE : Dict = crop_size def UpperCamelCase_ ( self ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Any = VivitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = VivitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A, 'image_mean' ) ) self.assertTrue(hasattr(A, 'image_std' ) ) self.assertTrue(hasattr(A, 'do_normalize' ) ) self.assertTrue(hasattr(A, 'do_resize' ) ) self.assertTrue(hasattr(A, 'do_center_crop' ) ) self.assertTrue(hasattr(A, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18} ) self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18} ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 ) self.assertEqual(image_processor.size, {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84} ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester, equal_resolution=A ) for video in video_inputs: self.assertIsInstance(A, A ) self.assertIsInstance(video[0], Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Dict = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched SCREAMING_SNAKE_CASE : List[Any] = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Tuple = prepare_video_inputs(self.image_processor_tester, equal_resolution=A, numpify=A ) for video in video_inputs: self.assertIsInstance(A, A ) self.assertIsInstance(video[0], np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Dict = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched SCREAMING_SNAKE_CASE : Any = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Tuple = prepare_video_inputs(self.image_processor_tester, equal_resolution=A, torchify=A ) for video in video_inputs: self.assertIsInstance(A, A ) self.assertIsInstance(video[0], torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), )
246
'''simple docstring''' import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=2, A=56, A=True, A=True, A=True, A=True, A=99, A=32, A=2, A=2, A=7, A="gelu_new", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=4, A="block_sparse", A=True, A=False, A=2, A=3, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : List[str] = seq_length SCREAMING_SNAKE_CASE : Optional[Any] = is_training SCREAMING_SNAKE_CASE : Dict = use_attention_mask SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : int = hidden_size SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : Optional[int] = hidden_act SCREAMING_SNAKE_CASE : str = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Any = max_position_embeddings SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size SCREAMING_SNAKE_CASE : Any = type_sequence_label_size SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : Optional[Any] = num_choices SCREAMING_SNAKE_CASE : int = rescale_embeddings SCREAMING_SNAKE_CASE : Any = attention_type SCREAMING_SNAKE_CASE : str = use_bias SCREAMING_SNAKE_CASE : Tuple = block_size SCREAMING_SNAKE_CASE : List[Any] = num_random_blocks def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE : Union[str, Any] = None if self.use_attention_mask: SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) SCREAMING_SNAKE_CASE : List[str] = BigBirdConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=A, initializer_range=self.initializer_range, attention_type=self.attention_type, block_size=self.block_size, num_random_blocks=self.num_random_blocks, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, ) return config, input_ids, token_type_ids, attention_mask def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : List[str] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask, } return config, inputs_dict @require_flax class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : str = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) A : List[Any] = False A : List[str] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCamelCase_ ( self ): '''simple docstring''' super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCamelCase_ ( self ): '''simple docstring''' super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCamelCase_ ( self ): '''simple docstring''' super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCamelCase_ ( self ): '''simple docstring''' super().test_hidden_states_output() @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained('google/bigbird-roberta-base' ) self.assertIsNotNone(A ) def UpperCamelCase_ ( self ): '''simple docstring''' if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(A, A ) SCREAMING_SNAKE_CASE : List[str] = model_class(A ) @jax.jit def model_jitted(A, A=None, **A ): return model(input_ids=A, attention_mask=A, **A ) with self.subTest('JIT Enabled' ): SCREAMING_SNAKE_CASE : List[str] = model_jitted(**A ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): SCREAMING_SNAKE_CASE : Optional[Any] = model_jitted(**A ).to_tuple() self.assertEqual(len(A ), len(A ) ) for jitted_output, output in zip(A, A ): self.assertEqual(jitted_output.shape, output.shape ) def UpperCamelCase_ ( self, A, A, A, A=1E-5, A="outputs", A=None ): '''simple docstring''' if name.startswith('outputs.attentions' ): return else: super().check_pt_flax_outputs(A, A, A, A, A, A )
246
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = {"""vocab_file""": """spiece.model"""} _A = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } _A = {"""bert_for_seq_generation""": 5_12} class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] def __init__(self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<::::>" , _lowerCamelCase = None , **_lowerCamelCase , ): """simple docstring""" UpperCAmelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , sep_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) UpperCAmelCase__ : Any = vocab_file UpperCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCamelCase ) @property def _a (self ): """simple docstring""" return self.sp_model.get_piece_size() def _a (self ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): """simple docstring""" UpperCAmelCase__ : Dict = self.__dict__.copy() UpperCAmelCase__ : Any = None return state def __setstate__(self , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCAmelCase__ : Dict = {} UpperCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a (self , _lowerCamelCase ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _a (self , _lowerCamelCase ): """simple docstring""" return self.sp_model.piece_to_id(_lowerCamelCase ) def _a (self , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Dict = self.sp_model.IdToPiece(_lowerCamelCase ) return token def _a (self , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Union[str, Any] = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_lowerCamelCase ) + token UpperCAmelCase__ : Union[str, Any] = [] else: current_sub_tokens.append(_lowerCamelCase ) out_string += self.sp_model.decode(_lowerCamelCase ) return out_string.strip() def _a (self , _lowerCamelCase , _lowerCamelCase = None ): """simple docstring""" if not os.path.isdir(_lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase__ : Optional[Any] = os.path.join( _lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , """wb""" ) as fi: UpperCAmelCase__ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
171
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed _A = { """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def a__ ( lowerCAmelCase ) -> Optional[int]: assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]: if args.student_type == "roberta": UpperCAmelCase__ : Optional[Any] = False elif args.student_type == "gpt2": UpperCAmelCase__ : Optional[int] = False def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Any: if args.student_type == "roberta": UpperCAmelCase__ : Tuple = False def a__ ( ) -> int: UpperCAmelCase__ : Dict = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=lowerCAmelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=lowerCAmelCase , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=lowerCAmelCase , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=lowerCAmelCase , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=lowerCAmelCase , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=lowerCAmelCase , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.15 , type=lowerCAmelCase , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=lowerCAmelCase , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=lowerCAmelCase , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=lowerCAmelCase , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=lowerCAmelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=lowerCAmelCase , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=lowerCAmelCase , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=lowerCAmelCase , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=lowerCAmelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.05 , type=lowerCAmelCase , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=lowerCAmelCase , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5E-4 , type=lowerCAmelCase , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=lowerCAmelCase , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=lowerCAmelCase , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.02 , type=lowerCAmelCase , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=lowerCAmelCase , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=lowerCAmelCase , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=lowerCAmelCase , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=lowerCAmelCase , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=lowerCAmelCase , default=5_00 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=lowerCAmelCase , default=40_00 , help="""Checkpoint interval.""" ) UpperCAmelCase__ : List[Any] = parser.parse_args() sanity_checks(lowerCAmelCase ) # ARGS # init_gpu_params(lowerCAmelCase ) set_seed(lowerCAmelCase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(F"""Param: {args}""" ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(lowerCAmelCase ) , lowerCAmelCase , indent=4 ) git_log(args.dump_path ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = MODEL_CLASSES[args.student_type] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCAmelCase__ : List[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCAmelCase__ : List[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCAmelCase__ : List[Any] = tokenizer.all_special_tokens.index(lowerCAmelCase ) UpperCAmelCase__ : Tuple = tokenizer.all_special_ids[idx] logger.info(F"""Special tokens {special_tok_ids}""" ) UpperCAmelCase__ : Any = special_tok_ids UpperCAmelCase__ : Any = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F"""Loading data from {args.data_file}""" ) with open(args.data_file , """rb""" ) as fp: UpperCAmelCase__ : List[str] = pickle.load(lowerCAmelCase ) if args.mlm: logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , """rb""" ) as fp: UpperCAmelCase__ : List[Any] = pickle.load(lowerCAmelCase ) UpperCAmelCase__ : List[Any] = np.maximum(lowerCAmelCase , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCAmelCase__ : int = 0.0 # do not predict special tokens UpperCAmelCase__ : str = torch.from_numpy(lowerCAmelCase ) else: UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : str = LmSeqsDataset(params=lowerCAmelCase , data=lowerCAmelCase ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(F"""Loading student config from {args.student_config}""" ) UpperCAmelCase__ : List[str] = student_config_class.from_pretrained(args.student_config ) UpperCAmelCase__ : List[Any] = True if args.student_pretrained_weights is not None: logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" ) UpperCAmelCase__ : List[str] = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCAmelCase ) else: UpperCAmelCase__ : List[Any] = student_model_class(lowerCAmelCase ) if args.n_gpu > 0: student.to(F"""cuda:{args.local_rank}""" ) logger.info("""Student loaded.""" ) # TEACHER # UpperCAmelCase__ : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCAmelCase ) if args.n_gpu > 0: teacher.to(F"""cuda:{args.local_rank}""" ) logger.info(F"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(lowerCAmelCase , lowerCAmelCase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(lowerCAmelCase , lowerCAmelCase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCAmelCase__ : Optional[int] = Distiller( params=lowerCAmelCase , dataset=lowerCAmelCase , token_probs=lowerCAmelCase , student=lowerCAmelCase , teacher=lowerCAmelCase ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
171
1
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging UpperCamelCase__ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( _a ): snake_case : Any = ["""input_features""", """attention_mask"""] def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=0.0 , __lowerCAmelCase=10 , __lowerCAmelCase=25 , __lowerCAmelCase="hamming_window" , __lowerCAmelCase=3_2768.0 , __lowerCAmelCase=0.97 , __lowerCAmelCase=1.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , **__lowerCAmelCase , ): super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase ) UpperCamelCase__ = feature_size UpperCamelCase__ = sampling_rate UpperCamelCase__ = padding_value UpperCamelCase__ = hop_length UpperCamelCase__ = win_length UpperCamelCase__ = frame_signal_scale UpperCamelCase__ = preemphasis_coeff UpperCamelCase__ = mel_floor UpperCamelCase__ = normalize_means UpperCamelCase__ = normalize_vars UpperCamelCase__ = win_function UpperCamelCase__ = return_attention_mask UpperCamelCase__ = win_length * sampling_rate // 1000 UpperCamelCase__ = hop_length * sampling_rate // 1000 UpperCamelCase__ = optimal_fft_length(self.sample_size ) UpperCamelCase__ = (self.n_fft // 2) + 1 def _lowerCamelCase ( self , __lowerCAmelCase ): if self.win_function == "hamming_window": UpperCamelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=__lowerCAmelCase ) else: UpperCamelCase__ = window_function(window_length=self.sample_size , name=self.win_function ) UpperCamelCase__ = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) UpperCamelCase__ = spectrogram( one_waveform * self.frame_signal_scale , window=__lowerCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__lowerCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=__lowerCAmelCase , mel_floor=self.mel_floor , log_mel="""log""" , ) return msfc_features.T def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): # make sure we normalize float32 arrays if self.normalize_means: UpperCamelCase__ = x[:input_length].mean(axis=0 ) UpperCamelCase__ = np.subtract(__lowerCAmelCase , __lowerCAmelCase ) if self.normalize_vars: UpperCamelCase__ = x[:input_length].std(axis=0 ) UpperCamelCase__ = np.divide(__lowerCAmelCase , __lowerCAmelCase ) if input_length < x.shape[0]: UpperCamelCase__ = padding_value # make sure array is in float32 UpperCamelCase__ = x.astype(np.floataa ) return x def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): UpperCamelCase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(__lowerCAmelCase , __lowerCAmelCase , self.padding_value ) for x, n in zip(__lowerCAmelCase , __lowerCAmelCase )] def __call__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCamelCase__ = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) UpperCamelCase__ = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase__ = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): UpperCamelCase__ = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase__ = [raw_speech] # extract fbank features UpperCamelCase__ = [self._extract_mfsc_features(__lowerCAmelCase ) for one_waveform in raw_speech] # convert into correct format for padding UpperCamelCase__ = BatchFeature({"""input_features""": features} ) UpperCamelCase__ = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) # make sure list is in array format UpperCamelCase__ = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , __lowerCAmelCase ): UpperCamelCase__ = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features] UpperCamelCase__ = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: UpperCamelCase__ = [np.asarray(__lowerCAmelCase , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: UpperCamelCase__ = ( np.array(__lowerCAmelCase , dtype=np.intaa ) if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) UpperCamelCase__ = self.normalize( padded_inputs["""input_features"""] , attention_mask=__lowerCAmelCase ) if return_tensors is not None: UpperCamelCase__ = padded_inputs.convert_to_tensors(__lowerCAmelCase ) return padded_inputs
87
import json import os import torch from diffusers import UNetaDModel os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True) os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) def _UpperCamelCase (a__ :int ): """simple docstring""" if hor == 128: UpperCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") UpperCamelCase__ = (32, 128, 256) UpperCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: UpperCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") UpperCamelCase__ = (32, 64, 128, 256) UpperCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") UpperCamelCase__ = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" ) UpperCamelCase__ = model.state_dict() UpperCamelCase__ = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } UpperCamelCase__ = UNetaDModel(**a__ ) print(f"""length of state dict: {len(state_dict.keys() )}""" ) print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) UpperCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): UpperCamelCase__ = state_dict.pop(a__ ) hf_value_function.load_state_dict(a__ ) torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" ) with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f: json.dump(a__ , a__ ) def _UpperCamelCase (): """simple docstring""" UpperCamelCase__ = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } UpperCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) UpperCamelCase__ = model UpperCamelCase__ = UNetaDModel(**a__ ) print(f"""length of state dict: {len(state_dict.keys() )}""" ) print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) UpperCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): UpperCamelCase__ = state_dict.pop(a__ ) hf_value_function.load_state_dict(a__ ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(a__ , a__ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
87
1
'''simple docstring''' import math def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): return math.pow(lowercase__ , 2 ) - a def __lowerCAmelCase (__lowerCAmelCase ): return 2 * x def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : str = 2.0 while start <= a: _UpperCAmelCase : Dict = math.pow(lowercase__ , 2 ) return start def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 9_999 , __lowerCAmelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ): if a < 0: raise ValueError("math domain error" ) _UpperCAmelCase : List[Any] = get_initial_point(lowercase__ ) for _ in range(lowercase__ ): _UpperCAmelCase : List[Any] = value _UpperCAmelCase : Any = value - fx(lowercase__ , lowercase__ ) / fx_derivative(lowercase__ ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
234
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Dict = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ ) else: __lowerCAmelCase : Optional[int] = np.full((len(lowercase__ ), sequence_length) , lowercase__ ) for i, tensor in enumerate(lowercase__ ): if padding_side == "right": if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : int = tensor[:sequence_length] else: if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : Optional[Any] = tensor[:sequence_length] return out_tensor.tolist() def _lowercase ( lowercase__ ): __lowerCAmelCase : Union[str, Any] = ord(lowercase__ ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True __lowerCAmelCase : int = unicodedata.category(lowercase__ ) if cat.startswith('''P''' ): return True return False @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = True _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = -100 _UpperCamelCase = "pt" def UpperCamelCase__ ( self , A_ ) ->Optional[int]: '''simple docstring''' import torch __lowerCAmelCase : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels''' __lowerCAmelCase : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __lowerCAmelCase : List[Any] = self.tokenizer.pad( A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __lowerCAmelCase : Dict = torch.tensor(batch['''entity_ids'''] ).shape[1] __lowerCAmelCase : Optional[int] = self.tokenizer.padding_side if padding_side == "right": __lowerCAmelCase : Any = [ list(A_ ) + [self.label_pad_token_id] * (sequence_length - len(A_ )) for label in labels ] else: __lowerCAmelCase : Optional[int] = [ [self.label_pad_token_id] * (sequence_length - len(A_ )) + list(A_ ) for label in labels ] __lowerCAmelCase : Tuple = [feature['''ner_tags'''] for feature in features] __lowerCAmelCase : List[Any] = padding_tensor(A_ , -1 , A_ , A_ ) __lowerCAmelCase : Optional[int] = [feature['''original_entity_spans'''] for feature in features] __lowerCAmelCase : Any = padding_tensor(A_ , (-1, -1) , A_ , A_ ) __lowerCAmelCase : Optional[Any] = {k: torch.tensor(A_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
275
0
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( lowerCAmelCase_ ) -> bool: _a : Tuple = len(lowerCAmelCase_ ) # We need to create solution object to save path. _a : str = [[0 for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )] _a : str = run_maze(lowerCAmelCase_ , 0 , 0 , lowerCAmelCase_ ) if solved: print('\n'.join(str(lowerCAmelCase_ ) for row in solutions ) ) else: print('No solution exists!' ) return solved def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: _a : Union[str, Any] = len(lowerCAmelCase_ ) # Final check point. if i == j == (size - 1): _a : Optional[int] = 1 return True _a : List[str] = (not i < 0) and (not j < 0) # Check lower bounds _a : int = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. _a : List[Any] = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited _a : List[str] = 1 # check for directions if ( run_maze(lowerCAmelCase_ , i + 1 , lowerCAmelCase_ , lowerCAmelCase_ ) or run_maze(lowerCAmelCase_ , lowerCAmelCase_ , j + 1 , lowerCAmelCase_ ) or run_maze(lowerCAmelCase_ , i - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) or run_maze(lowerCAmelCase_ , lowerCAmelCase_ , j - 1 , lowerCAmelCase_ ) ): return True _a : int = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
107
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ ) -> list: if any(not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or x < 0 for x in sequence ): raise TypeError('Sequence must be list of non-negative integers' ) for _ in range(len(lowerCAmelCase_ ) ): for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_ , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
107
1
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _UpperCamelCase : Any = 16 _UpperCamelCase : Tuple = 32 def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" ): '''simple docstring''' lowercase__ : int = AutoTokenizer.from_pretrained(_lowerCAmelCase ) lowercase__ : Dict = load_dataset('glue' , 'mrpc' ) def tokenize_function(_lowerCAmelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) lowercase__ : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ : List[str] = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : str = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowerCAmelCase : Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. lowercase__ : List[Any] = DataLoader( tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) lowercase__ : Optional[int] = DataLoader( tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ): '''simple docstring''' model.eval() lowercase__ : List[str] = 0 for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ : int = model(**_lowerCAmelCase ) lowercase__ : List[Any] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowercase__ , lowercase__ : Dict = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_lowerCAmelCase ) - 1: lowercase__ : str = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowercase__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_lowerCAmelCase , references=_lowerCAmelCase , ) lowercase__ : Any = metric.compute() return eval_metric["accuracy"] def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : Tuple = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : str = config['lr'] lowercase__ : Any = int(config['num_epochs'] ) lowercase__ : Optional[Any] = int(config['seed'] ) lowercase__ : str = int(config['batch_size'] ) lowercase__ : Optional[int] = args.model_name_or_path set_seed(_lowerCAmelCase ) lowercase__ , lowercase__ : List[str] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : Dict = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase ) # Instantiate optimizer lowercase__ : Dict = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase ) if accelerator.state.deepspeed_plugin is not None: lowercase__ : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: lowercase__ : List[str] = 1 lowercase__ : int = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ : List[Any] = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , ) else: lowercase__ : Union[str, Any] = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # We need to keep track of how many total steps we have iterated over lowercase__ : int = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ : Union[str, Any] = 0 lowercase__ : str = evaluate.load('glue' , 'mrpc' ) lowercase__ : Any = num_epochs if args.partial_train_epoch is not None: lowercase__ : int = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) lowercase__ : List[Any] = args.resume_from_checkpoint.split('epoch_' )[1] lowercase__ : List[str] = '' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break lowercase__ : str = int(_lowerCAmelCase ) + 1 lowercase__ : str = evaluation_loop(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) accelerator.print('resumed checkpoint performance:' , _lowerCAmelCase ) accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] ) accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] ) with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , 'r' ) as f: lowercase__ : Optional[Any] = json.load(_lowerCAmelCase ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model lowercase__ : Optional[Any] = {} for epoch in range(_lowerCAmelCase , _lowerCAmelCase ): model.train() for step, batch in enumerate(_lowerCAmelCase ): lowercase__ : Optional[int] = model(**_lowerCAmelCase ) lowercase__ : Tuple = outputs.loss lowercase__ : Any = loss / gradient_accumulation_steps accelerator.backward(_lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 lowercase__ : Optional[Any] = f"""epoch_{epoch}""" lowercase__ : Optional[int] = os.path.join(args.output_dir , _lowerCAmelCase ) accelerator.save_state(_lowerCAmelCase ) lowercase__ : Union[str, Any] = evaluation_loop(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) lowercase__ : int = accuracy lowercase__ : int = lr_scheduler.get_lr()[0] lowercase__ : Dict = optimizer.param_groups[0]['lr'] lowercase__ : str = epoch lowercase__ : int = overall_step accelerator.print(f"""epoch {epoch}:""" , _lowerCAmelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , 'w' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def a_ ( ): '''simple docstring''' lowercase__ : List[Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , ) parser.add_argument( '--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--partial_train_epoch' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='If passed, the training will stop after this number of epochs.' , ) parser.add_argument( '--num_epochs' , type=_lowerCAmelCase , default=2 , help='Number of train epochs.' , ) lowercase__ : Optional[int] = parser.parse_args() lowercase__ : List[str] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
77
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
0
'''simple docstring''' import colorsys from PIL import Image # type: ignore def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase__ : Optional[Any] = x UpperCAmelCase__ : Optional[int] = y for step in range(UpperCamelCase__ ): # noqa: B007 UpperCAmelCase__ : List[str] = a * a - b * b + x UpperCAmelCase__ : Optional[int] = 2 * a * b + y UpperCAmelCase__ : int = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _UpperCamelCase ( UpperCamelCase__ ): if distance == 1: return (0, 0, 0) else: return (2_5_5, 2_5_5, 2_5_5) def _UpperCamelCase ( UpperCamelCase__ ): if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(UpperCamelCase__ , 1 , 1 ) ) def _UpperCamelCase ( UpperCamelCase__ = 8_0_0 , UpperCamelCase__ = 6_0_0 , UpperCamelCase__ = -0.6 , UpperCamelCase__ = 0 , UpperCamelCase__ = 3.2 , UpperCamelCase__ = 5_0 , UpperCamelCase__ = True , ): UpperCAmelCase__ : str = Image.new("""RGB""" , (image_width, image_height) ) UpperCAmelCase__ : Optional[int] = img.load() # loop through the image-coordinates for image_x in range(UpperCamelCase__ ): for image_y in range(UpperCamelCase__ ): # determine the figure-coordinates based on the image-coordinates UpperCAmelCase__ : Union[str, Any] = figure_width / image_width * image_height UpperCAmelCase__ : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCAmelCase__ : Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCAmelCase__ : List[str] = get_distance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCAmelCase__ : Any = get_color_coded_rgb(UpperCamelCase__ ) else: UpperCAmelCase__ : Tuple = get_black_and_white_rgb(UpperCamelCase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure __A =get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
361
'''simple docstring''' def _UpperCamelCase ( UpperCamelCase__ = 4_0_0_0_0_0_0 ): UpperCAmelCase__ : List[str] = [0, 1] UpperCAmelCase__ : Any = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 UpperCAmelCase__ : str = 0 for j in range(len(UpperCamelCase__ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f"""{solution() = }""")
283
0
'''simple docstring''' import argparse import os import re import packaging.version lowerCAmelCase__ = '''examples/''' lowerCAmelCase__ = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } lowerCAmelCase__ = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } lowerCAmelCase__ = '''README.md''' def _A ( A__ , A__ , A__ ): """simple docstring""" with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __lowercase = f.read() __lowercase , __lowercase = REPLACE_PATTERNS[pattern] __lowercase = replace.replace('''VERSION''' , A__ ) __lowercase = re_pattern.sub(A__ , A__ ) with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(A__ ) def _A ( A__ ): """simple docstring""" for folder, directories, fnames in os.walk(A__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(A__ , A__ ) , A__ , pattern='''examples''' ) def _A ( A__ , A__=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(A__ , A__ , A__ ) if not patch: update_version_in_examples(A__ ) def _A ( ): """simple docstring""" __lowercase = '''🤗 Transformers currently provides the following architectures''' __lowercase = '''1. Want to contribute a new model?''' with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __lowercase = f.readlines() # Find the start of the list. __lowercase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowercase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __lowercase = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(A__ ) def _A ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __lowercase = f.read() __lowercase = REPLACE_PATTERNS['''init'''][0].search(A__ ).groups()[0] return packaging.version.parse(A__ ) def _A ( A__=False ): """simple docstring""" __lowercase = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __lowercase = default_version.base_version elif patch: __lowercase = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: __lowercase = F"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if that's the right one. __lowercase = input(F"Which version are you releasing? [{default_version}]" ) if len(A__ ) == 0: __lowercase = default_version print(F"Updating version to {version}." ) global_version_update(A__ , patch=A__ ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def _A ( ): """simple docstring""" __lowercase = get_version() __lowercase = F"{current_version.major}.{current_version.minor + 1}.0.dev0" __lowercase = current_version.base_version # Check with the user we got that right. __lowercase = input(F"Which version are we developing now? [{dev_version}]" ) if len(A__ ) == 0: __lowercase = dev_version print(F"Updating version to {version}." ) global_version_update(A__ ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') lowerCAmelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
104
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __snake_case = logging.get_logger(__name__) __snake_case = TypeVar("""DatasetType""", Dataset, IterableDataset) def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "first_exhausted" , ) -> DatasetType: '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase_ ): if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' ) if i == 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ ) else: return _interleave_iterable_datasets( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ ) def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , ) -> DatasetType: '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase_ ): if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' ) if i == 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ ) else: return _concatenate_iterable_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
176
0
import os import sys import transformers A__ = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
359
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class a : def __init__( self :str ,__lowercase :Optional[Any] ,__lowercase :List[Any]=1_3 ,__lowercase :str=7 ,__lowercase :Dict=True ,__lowercase :Any=True ,__lowercase :str=True ,__lowercase :Any=True ,__lowercase :Tuple=9_9 ,__lowercase :List[str]=3_2 ,__lowercase :int=5 ,__lowercase :Union[str, Any]=4 ,__lowercase :List[str]=4 ,__lowercase :Any="gelu" ,__lowercase :Any=0.0 ,__lowercase :Tuple=0.1 ,__lowercase :str=True ,__lowercase :Tuple=5_1_2 ,__lowercase :Dict=1_6 ,__lowercase :Tuple=2 ,__lowercase :List[str]=0.02 ,__lowercase :Dict=3 ,__lowercase :Optional[int]=4 ,__lowercase :Tuple=None ,): snake_case__ : Optional[int] = parent snake_case__ : Optional[Any] = batch_size snake_case__ : Optional[Any] = seq_length snake_case__ : Tuple = is_training snake_case__ : Optional[Any] = use_input_mask snake_case__ : List[Any] = use_token_type_ids snake_case__ : str = use_labels snake_case__ : List[Any] = vocab_size snake_case__ : Optional[int] = hidden_size snake_case__ : List[Any] = num_hidden_layers snake_case__ : str = num_attention_heads snake_case__ : int = intermediate_multiple_size snake_case__ : Tuple = hidden_act snake_case__ : Optional[Any] = hidden_dropout snake_case__ : str = attention_dropout snake_case__ : List[str] = weight_tying snake_case__ : Optional[Any] = max_position_embeddings snake_case__ : Optional[int] = type_vocab_size snake_case__ : str = type_sequence_label_size snake_case__ : Dict = initializer_range snake_case__ : int = num_labels snake_case__ : int = num_choices snake_case__ : int = scope def __lowerCamelCase ( self :List[str] ): snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case__ : str = None if self.use_input_mask: snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) snake_case__ : Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCamelCase ( self :int ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,) def __lowerCamelCase ( self :str ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs() snake_case__ : Union[str, Any] = True return config, input_ids, input_mask, token_labels def __lowerCamelCase ( self :List[Any] ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Optional[Any] ): snake_case__ : Union[str, Any] = GPTNeoXJapaneseModel(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : Union[str, Any] = model(__lowercase ,attention_mask=__lowercase ) snake_case__ : Optional[Any] = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :Tuple ,__lowercase :Union[str, Any] ): snake_case__ : Any = True snake_case__ : Tuple = GPTNeoXJapaneseModel(__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : str = model(__lowercase ,attention_mask=__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ,__lowercase :Any ): snake_case__ : Any = GPTNeoXJapaneseForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self :Optional[int] ,__lowercase :Any ,__lowercase :int ,__lowercase :List[str] ): snake_case__ : Optional[int] = True snake_case__ : Optional[int] = GPTNeoXJapaneseForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() # first forward pass snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,use_cache=__lowercase ) snake_case__ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) snake_case__ : int = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and snake_case__ : Optional[int] = torch.cat([input_ids, next_tokens] ,dim=-1 ) snake_case__ : Optional[int] = torch.cat([input_mask, next_mask] ,dim=-1 ) snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,output_hidden_states=__lowercase ) snake_case__ : Tuple = output_from_no_past['''hidden_states'''][0] snake_case__ : List[str] = model( __lowercase ,attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0] # select random slice snake_case__ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item() snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) ) def __lowerCamelCase ( self :Dict ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = config_and_inputs snake_case__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () __lowerCAmelCase : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () __lowerCAmelCase : int = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) __lowerCAmelCase : List[Any] = False __lowerCAmelCase : Tuple = False __lowerCAmelCase : Tuple = False __lowerCAmelCase : str = False def __lowerCamelCase ( self :Any ): snake_case__ : int = GPTNeoXJapaneseModelTester(self ) snake_case__ : Any = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 ) def __lowerCamelCase ( self :Any ): self.config_tester.run_common_tests() def __lowerCamelCase ( self :str ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :Optional[Any] ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :Optional[Any] ): # This regression test was failing with PyTorch < 1.3 snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case__ : List[str] = None self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :Optional[int] ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :str ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*__lowercase ) @slow def __lowerCamelCase ( self :Dict ): snake_case__ : str = '''abeja/gpt-neox-japanese-2.7b''' snake_case__ : int = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、'''] snake_case__ : Optional[int] = [ '''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''', '''100年後に必要とされる会社は、「人」が中心の会社です。''', '''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''', '''国境の長いトンネルを抜けると、そこは雪国だった。''', '''美味しい日本食といえば、やっぱりお寿司ですよね。''', ] snake_case__ : Optional[int] = GPTNeoXJapaneseTokenizer.from_pretrained(__lowercase ) snake_case__ : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowercase ) snake_case__ : Optional[int] = [] for prompt in prompts: snake_case__ : Dict = tokenizer(__lowercase ,return_tensors='''pt''' ).input_ids snake_case__ : Union[str, Any] = model.generate(__lowercase ,max_length=5_0 ) snake_case__ : int = tokenizer.batch_decode(__lowercase ,skip_special_tokens=__lowercase ) predicted_outputs += generated_string self.assertListEqual(__lowercase ,__lowercase )
44
0
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : Optional[Any] = FileLock(str(tmpdir / '''foo.lock''' ) ) lowercase__ : int = FileLock(str(tmpdir / '''foo.lock''' ) ) lowercase__ : Union[str, Any] = 0.0_1 with locka.acquire(): with pytest.raises(UpperCAmelCase ): lowercase__ : Optional[int] = time.time() locka.acquire(UpperCAmelCase ) assert time.time() - _start > timeout def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : str = '''a''' * 1000 + '''.lock''' lowercase__ : Dict = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(UpperCAmelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 lowercase__ : int = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(UpperCAmelCase ): locka.acquire(0 )
198
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase ( a__ ): '''simple docstring''' def _lowerCAmelCase( self ) -> List[str]: lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) ) class UpperCAmelCase : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase="swish" , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , __lowerCAmelCase=0.2_5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , ) -> List[Any]: lowercase__ : List[str] = parent lowercase__ : List[Any] = batch_size lowercase__ : List[str] = image_size lowercase__ : Optional[int] = patch_size lowercase__ : Tuple = num_channels lowercase__ : List[str] = make_divisible(512 * width_multiplier , divisor=8 ) lowercase__ : Optional[int] = hidden_act lowercase__ : List[Any] = conv_kernel_size lowercase__ : Dict = output_stride lowercase__ : List[Any] = classifier_dropout_prob lowercase__ : str = use_labels lowercase__ : List[Any] = is_training lowercase__ : Tuple = num_labels lowercase__ : Optional[int] = initializer_range lowercase__ : Tuple = scope lowercase__ : List[Any] = width_multiplier lowercase__ : Optional[int] = ffn_dropout lowercase__ : int = attn_dropout def _lowerCAmelCase( self ) -> Optional[int]: lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Any = None lowercase__ : Tuple = None if self.use_labels: lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowercase__ : Dict = self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCAmelCase( self ) -> Tuple: return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: lowercase__ : Optional[int] = MobileViTVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__ : str = model(__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: lowercase__ : Optional[Any] = self.num_labels lowercase__ : Dict = MobileViTVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__ : Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: lowercase__ : str = self.num_labels lowercase__ : List[Any] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__ : int = model(__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowercase__ : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCAmelCase( self ) -> int: lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs lowercase__ : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( a__ , a__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _lowerCAmelCase( self ) -> int: lowercase__ : Tuple = MobileViTVaModelTester(self ) lowercase__ : Any = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def _lowerCAmelCase( self ) -> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' ) def _lowerCAmelCase( self ) -> str: pass @unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' ) def _lowerCAmelCase( self ) -> Optional[Any]: pass @unittest.skip(reason='''MobileViTV2 does not output attentions''' ) def _lowerCAmelCase( self ) -> Optional[int]: pass @require_torch_multi_gpu @unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' ) def _lowerCAmelCase( self ) -> Any: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowerCAmelCase( self ) -> str: pass def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(__lowerCAmelCase ) lowercase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : str = [*signature.parameters.keys()] lowercase__ : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def _lowerCAmelCase( self ) -> str: lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowerCAmelCase( self ) -> Union[str, Any]: def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): lowercase__ : Optional[int] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) lowercase__ : List[Any] = outputs.hidden_states lowercase__ : Optional[int] = 5 self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowercase__ : str = 2 for i in range(len(__lowerCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Tuple = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def _lowerCAmelCase( self ) -> List[Any]: lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) def _lowerCAmelCase( self ) -> Dict: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase ) @slow def _lowerCAmelCase( self ) -> List[str]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Dict = MobileViTVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def __UpperCamelCase ( ): lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase( self ) -> int: return ( MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ) if is_vision_available() else None ) @slow def _lowerCAmelCase( self ) -> List[Any]: lowercase__ : Dict = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to( __lowerCAmelCase ) lowercase__ : List[Any] = self.default_image_processor lowercase__ : Optional[int] = prepare_img() lowercase__ : int = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase__ : List[str] = model(**__lowerCAmelCase ) # verify the logits lowercase__ : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) lowercase__ : int = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def _lowerCAmelCase( self ) -> Optional[int]: lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase__ : int = model.to(__lowerCAmelCase ) lowercase__ : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase__ : str = prepare_img() lowercase__ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase__ : str = model(**__lowerCAmelCase ) lowercase__ : Tuple = outputs.logits # verify the logits lowercase__ : List[Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __lowerCAmelCase ) lowercase__ : Union[str, Any] = torch.tensor( [ [[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]], [[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]], [[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]], ] , device=__lowerCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def _lowerCAmelCase( self ) -> Any: lowercase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase__ : List[str] = model.to(__lowerCAmelCase ) lowercase__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase__ : int = prepare_img() lowercase__ : List[str] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase__ : Optional[Any] = model(**__lowerCAmelCase ) lowercase__ : Optional[int] = outputs.logits.detach().cpu() lowercase__ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] ) lowercase__ : Optional[int] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __lowerCAmelCase ) lowercase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase ) lowercase__ : Union[str, Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
198
1
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : str = '▁' __A : Union[str, Any] = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'} __A : List[str] = { 'sentencepiece_model_file': 'sentencepiece.bpe.model', 'vocab_file': 'vocab.txt', } __A : Union[str, Any] = { 'vocab_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', }, 'sentencepiece_model_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', }, } __A : Tuple = { 'ernie-m-base': 514, 'ernie-m-large': 514, } __A : Union[str, Any] = { 'ernie-m-base': {'do_lower_case': False}, 'ernie-m-large': {'do_lower_case': False}, } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str = ['input_ids'] lowercase : Optional[Any] = VOCAB_FILES_NAMES lowercase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : Any = RESOURCE_FILES_NAMES def __init__( self :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str]=None ,_UpperCamelCase :str=False ,_UpperCamelCase :str="utf8" ,_UpperCamelCase :Optional[Any]="[UNK]" ,_UpperCamelCase :Tuple="[SEP]" ,_UpperCamelCase :Any="[PAD]" ,_UpperCamelCase :Optional[int]="[CLS]" ,_UpperCamelCase :Any="[MASK]" ,_UpperCamelCase :Optional[Dict[str, Any]] = None ,**_UpperCamelCase :List[Any] ,): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. snake_case_ : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,pad_token=__snake_case ,cls_token=__snake_case ,mask_token=__snake_case ,vocab_file=__snake_case ,encoding=__snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**__snake_case ,) snake_case_ : List[str] = do_lower_case snake_case_ : Union[str, Any] = sentencepiece_model_ckpt snake_case_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__snake_case ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: snake_case_ : Union[str, Any] = self.load_vocab(filepath=__snake_case ) else: snake_case_ : Tuple = {self.sp_model.id_to_piece(__snake_case ): id for id in range(self.sp_model.get_piece_size() )} snake_case_ : Dict = {v: k for k, v in self.vocab.items()} def a__ ( self :Optional[Any] ,_UpperCamelCase :Tuple ): if text is None: return None snake_case_ : List[Any] = self.tokenize(__snake_case ) snake_case_ : Union[str, Any] = '', [] for i, ch in enumerate(__snake_case ): if ch in self.SP_CHAR_MAPPING: snake_case_ : List[str] = self.SP_CHAR_MAPPING.get(__snake_case ) else: snake_case_ : Tuple = unicodedata.normalize("""NFKC""" ,__snake_case ) if self.is_whitespace(__snake_case ): continue normalized_text += ch char_mapping.extend([i] * len(__snake_case ) ) snake_case_ : Optional[Any] = normalized_text, [], 0 if self.do_lower_case: snake_case_ : List[Any] = text.lower() for token in split_tokens: if token[:1] == "▁": snake_case_ : Tuple = token[1:] snake_case_ : str = text[offset:].index(__snake_case ) + offset snake_case_ : str = start + len(__snake_case ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) snake_case_ : Dict = end return token_mapping @property def a__ ( self :int ): return len(self.vocab ) def a__ ( self :Tuple ): return dict(self.vocab ,**self.added_tokens_encoder ) def __getstate__( self :List[Any] ): snake_case_ : Optional[Any] = self.__dict__.copy() snake_case_ : Union[str, Any] = None return state def __setstate__( self :int ,_UpperCamelCase :Dict ): snake_case_ : Optional[int] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): snake_case_ : Optional[int] = {} snake_case_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def a__ ( self :int ,_UpperCamelCase :Any ): return "".join((self.SP_CHAR_MAPPING.get(__snake_case ,__snake_case ) for c in text) ) def a__ ( self :Any ,_UpperCamelCase :List[Any] ,_UpperCamelCase :int=False ,_UpperCamelCase :Optional[int]=6_4 ,_UpperCamelCase :List[Any]=0.1 ): if self.sp_model_kwargs.get("""enable_sampling""" ) is True: snake_case_ : Dict = True if self.sp_model_kwargs.get("""alpha""" ) is not None: snake_case_ : Union[str, Any] = self.sp_model_kwargs.get("""alpha""" ) if self.sp_model_kwargs.get("""nbest_size""" ) is not None: snake_case_ : str = self.sp_model_kwargs.get("""nbest_size""" ) if not enable_sampling: snake_case_ : Any = self.sp_model.EncodeAsPieces(__snake_case ) else: snake_case_ : List[Any] = self.sp_model.SampleEncodeAsPieces(__snake_case ,__snake_case ,__snake_case ) snake_case_ : str = [] for pi, piece in enumerate(__snake_case ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__snake_case ) and pi != 0: new_pieces.append(__snake_case ) continue else: continue snake_case_ : Optional[Any] = 0 for i, chunk in enumerate(__snake_case ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__snake_case ) or self.is_punct(__snake_case ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__snake_case ) snake_case_ : int = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) snake_case_ : List[Any] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) snake_case_ : int = i if len(__snake_case ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def a__ ( self :List[Any] ,_UpperCamelCase :Dict ): snake_case_ : List[Any] = ''.join(__snake_case ).replace(__snake_case ,""" """ ).strip() return out_string def a__ ( self :Any ,_UpperCamelCase :List[str] ): snake_case_ : str = self.convert_ids_to_tokens(__snake_case ) snake_case_ : Dict = ''.join(__snake_case ).replace(__snake_case ,""" """ ).strip() return out_string def a__ ( self :List[str] ,_UpperCamelCase :Tuple ): return self.vocab.get(__snake_case ,self.vocab.get(self.unk_token ) ) def a__ ( self :Optional[int] ,_UpperCamelCase :int ): return self.reverse_vocab.get(__snake_case ,self.unk_token ) def a__ ( self :str ,_UpperCamelCase :List[str] ,_UpperCamelCase :Union[str, Any]=None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ : Tuple = [self.cls_token_id] snake_case_ : Optional[Any] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def a__ ( self :Union[str, Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Dict=None ): if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def a__ ( self :Dict ,_UpperCamelCase :List[Any] ,_UpperCamelCase :int=None ,_UpperCamelCase :Any=False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1] return [1] + ([0] * len(__snake_case )) + [1] def a__ ( self :List[Any] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(__snake_case ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__snake_case ) + 1) + [1] * (len(__snake_case ) + 3) def a__ ( self :Dict ,_UpperCamelCase :List[str] ): if "\u4e00" <= char <= "\u9fff": return True return False def a__ ( self :Dict ,_UpperCamelCase :Union[str, Any] ): if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def a__ ( self :Any ,_UpperCamelCase :List[Any] ): if char in ",;:.?!~,;:。?!《》【】": return True return False def a__ ( self :Dict ,_UpperCamelCase :str ): if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__snake_case ) == 1: snake_case_ : Optional[Any] = unicodedata.category(__snake_case ) if cat == "Zs": return True return False def a__ ( self :Union[str, Any] ,_UpperCamelCase :Optional[Any] ): snake_case_ : str = {} with io.open(__snake_case ,"""r""" ,encoding="""utf-8""" ) as f: for index, line in enumerate(__snake_case ): snake_case_ : int = line.rstrip("""\n""" ) snake_case_ : Optional[int] = int(__snake_case ) return token_to_idx def a__ ( self :List[Any] ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ): snake_case_ : Optional[int] = 0 if os.path.isdir(__snake_case ): snake_case_ : Optional[int] = os.path.join( __snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) else: snake_case_ : Any = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(__snake_case ,"""w""" ,encoding="""utf-8""" ) as writer: for token, token_index in sorted(self.vocab.items() ,key=lambda _UpperCamelCase : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' """ Please check that the vocabulary is not corrupted!""" ) snake_case_ : Optional[int] = token_index writer.write(token + """\n""" ) index += 1 snake_case_ : Any = os.path.join(__snake_case ,"""sentencepiece.bpe.model""" ) with open(__snake_case ,"""wb""" ) as fi: snake_case_ : List[Any] = self.sp_model.serialized_model_proto() fi.write(__snake_case ) return (vocab_file,)
357
'''simple docstring''' import functools def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[str] = len(lowerCamelCase_ ) snake_case_ : Dict = len(lowerCamelCase_ ) @functools.cache def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
8
0
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format='%(message)s') def UpperCAmelCase ( a_ ) -> np.ndarray: """simple docstring""" return input_array.reshape((input_array.size, 1) ) def UpperCAmelCase ( a_ , a_ , a_ ) -> np.ndarray: """simple docstring""" __A = np.nan for i in range(a_ ): __A = features[:, labels == i] __A = data.mean(1 ) # Centralize the data of class i __A = data - column_reshape(a_ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(a_ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) __A = np.dot(a_ , centered_data.T ) return covariance_sum / features.shape[1] def UpperCAmelCase ( a_ , a_ , a_ ) -> np.ndarray: """simple docstring""" __A = features.mean(1 ) __A = np.nan for i in range(a_ ): __A = features[:, labels == i] __A = data.shape[1] __A = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(a_ ) - column_reshape(a_ ) , (column_reshape(a_ ) - column_reshape(a_ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) __A = device_data * np.dot( column_reshape(a_ ) - column_reshape(a_ ) , (column_reshape(a_ ) - column_reshape(a_ )).T , ) return covariance_sum / features.shape[1] def UpperCAmelCase ( a_ , a_ ) -> np.ndarray: """simple docstring""" if features.any(): __A = features.mean(1 ) # Center the dataset __A = features - np.reshape(a_ , (data_mean.size, 1) ) __A = np.dot(a_ , centered_data.T ) / features.shape[1] __A , __A = np.linalg.eigh(a_ ) # Take all the columns in the reverse order (-1), and then takes only the first __A = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space __A = np.dot(filtered_eigenvectors.T , a_ ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=a_ ) logging.error("Dataset empty" ) raise AssertionError def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> np.ndarray: """simple docstring""" assert classes > dimensions # Check if features have been already loaded if features.any: __A , __A = eigh( covariance_between_classes(a_ , a_ , a_ ) , covariance_within_classes(a_ , a_ , a_ ) , ) __A = eigenvectors[:, ::-1][:, :dimensions] __A , __A , __A = np.linalg.svd(a_ ) __A = svd_matrix[:, 0:dimensions] __A = np.dot(filtered_svd_matrix.T , a_ ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=a_ ) logging.error("Dataset empty" ) raise AssertionError def UpperCAmelCase ( ) -> None: """simple docstring""" __A = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) __A = np.array([0, 0, 0, 1, 1] ) __A = 2 __A = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(a_ ) as error_info: __A = linear_discriminant_analysis( a_ , a_ , a_ , a_ ) if isinstance(a_ , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def UpperCAmelCase ( ) -> None: """simple docstring""" __A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) __A = 2 __A = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] ) with pytest.raises(a_ ) as error_info: __A = principal_component_analysis(a_ , a_ ) if not np.allclose(a_ , a_ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
15
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class lowerCAmelCase__ : def __init__( self ): """simple docstring""" lowercase_ : int = {} def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Dict = {} def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" if nodea not in self.connections: self.add_node(__SCREAMING_SNAKE_CASE ) if nodea not in self.connections: self.add_node(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = probability def _snake_case ( self ): """simple docstring""" return list(self.connections ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Any = 0 lowercase_ : Tuple = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ): """simple docstring""" lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : str = Counter(graph.get_nodes() ) lowercase_ : Any = start for _ in range(__SCREAMING_SNAKE_CASE ): lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
93
0
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCAmelCase_ ( unittest.TestCase ): UpperCAmelCase__ : Union[str, Any] = JukeboxTokenizer UpperCAmelCase__ : Optional[int] = { "artist": "Zac Brown Band", "genres": "Country", "lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ", } @require_torch def snake_case_ ( self ) -> Optional[Any]: import torch UpperCamelCase : Tuple = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) UpperCamelCase : List[str] = tokenizer(**self.metas )['input_ids'] # fmt: off UpperCamelCase : Dict = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) ) @require_torch def snake_case_ ( self ) -> Optional[Any]: import torch UpperCamelCase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) UpperCamelCase : Dict = tokenizer(**self.metas )['input_ids'] # fmt: off UpperCamelCase : Optional[int] = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
103
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : int = ["input_values", "attention_mask"] def __init__( self, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 1_6000, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = 80, SCREAMING_SNAKE_CASE_ = 16, SCREAMING_SNAKE_CASE_ = 64, SCREAMING_SNAKE_CASE_ = "hann_window", SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = 80, SCREAMING_SNAKE_CASE_ = 7600, SCREAMING_SNAKE_CASE_ = 1e-10, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = True, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]: super().__init__(feature_size=SCREAMING_SNAKE_CASE_, sampling_rate=SCREAMING_SNAKE_CASE_, padding_value=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = do_normalize UpperCamelCase : Optional[Any] = return_attention_mask UpperCamelCase : Union[str, Any] = num_mel_bins UpperCamelCase : int = hop_length UpperCamelCase : Any = win_length UpperCamelCase : Dict = win_function UpperCamelCase : Any = frame_signal_scale UpperCamelCase : str = fmin UpperCamelCase : int = fmax UpperCamelCase : Dict = mel_floor UpperCamelCase : Any = reduction_factor UpperCamelCase : List[str] = win_length * sampling_rate // 1000 UpperCamelCase : Union[str, Any] = hop_length * sampling_rate // 1000 UpperCamelCase : Tuple = optimal_fft_length(self.sample_size ) UpperCamelCase : int = (self.n_fft // 2) + 1 UpperCamelCase : Any = window_function(window_length=self.sample_size, name=self.win_function, periodic=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = mel_filter_bank( num_frequency_bins=self.n_freqs, num_mel_filters=self.num_mel_bins, min_frequency=self.fmin, max_frequency=self.fmax, sampling_rate=self.sampling_rate, norm='slaney', mel_scale='slaney', ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers', SCREAMING_SNAKE_CASE_, ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers', SCREAMING_SNAKE_CASE_, ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: UpperCamelCase : Dict = np.array(SCREAMING_SNAKE_CASE_, np.intaa ) UpperCamelCase : int = [] for vector, length in zip(SCREAMING_SNAKE_CASE_, attention_mask.sum(-1 ) ): UpperCamelCase : Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: UpperCamelCase : Optional[Any] = padding_value normed_input_values.append(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : str = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def snake_case_ ( self, SCREAMING_SNAKE_CASE_, ) -> np.ndarray: UpperCamelCase : int = spectrogram( SCREAMING_SNAKE_CASE_, window=self.window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, mel_filters=self.mel_filters, mel_floor=self.mel_floor, log_mel='log10', ) return log_mel_spec.T def __call__( self, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchFeature: if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: UpperCamelCase : Dict = self._process_audio( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, ) else: UpperCamelCase : str = None if audio_target is not None: UpperCamelCase : str = self._process_audio( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, ) if inputs is None: return inputs_target else: UpperCamelCase : Dict = inputs_target['input_values'] UpperCamelCase : str = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: UpperCamelCase : str = decoder_attention_mask return inputs def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchFeature: UpperCamelCase : Any = isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) UpperCamelCase : Any = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ) and (isinstance(speech[0], (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ): UpperCamelCase : int = np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and speech.dtype is np.dtype(np.floataa ): UpperCamelCase : Any = speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase : str = [speech] # needed to make pad() work on spectrogram inputs UpperCamelCase : Optional[Any] = self.feature_size # convert into correct format for padding if is_target: UpperCamelCase : Optional[int] = [self._extract_mel_features(SCREAMING_SNAKE_CASE_ ) for waveform in speech] UpperCamelCase : Union[str, Any] = BatchFeature({'input_values': features} ) UpperCamelCase : List[str] = self.num_mel_bins else: UpperCamelCase : Dict = BatchFeature({'input_values': speech} ) UpperCamelCase : Tuple = self.pad( SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, ) UpperCamelCase : int = feature_size_hack # convert input values to correct format UpperCamelCase : Optional[int] = padded_inputs['input_values'] if not isinstance(input_values[0], np.ndarray ): UpperCamelCase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa ) for array in input_values] elif ( not isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and isinstance(input_values[0], np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): UpperCamelCase : Optional[int] = [array.astype(np.floataa ) for array in input_values] elif isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): UpperCamelCase : Dict = input_values.astype(np.floataa ) # convert attention_mask to correct format UpperCamelCase : Dict = padded_inputs.get('attention_mask' ) if attention_mask is not None: UpperCamelCase : int = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: UpperCamelCase : Dict = ( attention_mask if self._get_padding_strategies(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD else None ) UpperCamelCase : Tuple = self.zero_mean_unit_var_norm( padded_inputs['input_values'], attention_mask=SCREAMING_SNAKE_CASE_, padding_value=self.padding_value ) if return_tensors is not None: UpperCamelCase : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ ) return padded_inputs def snake_case_ ( self ) -> Dict[str, Any]: UpperCamelCase : Any = super().to_dict() # Don't serialize these as they are derived from the other properties. UpperCamelCase : Any = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
103
1
"""simple docstring""" from __future__ import annotations def UpperCamelCase ( _lowerCAmelCase : list[int], _lowerCAmelCase : int ) -> bool: if len(_lowerCAmelCase ) == 0: return False _UpperCAmelCase : Dict = len(_lowerCAmelCase ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint], _lowerCAmelCase ) else: return binary_search(a_list[midpoint + 1 :], _lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : int = input('''Enter numbers separated by comma:\n''').strip() lowerCamelCase__ : int = [int(item.strip()) for item in user_input.split(''',''')] lowerCamelCase__ : List[str] = int(input('''Enter the number to be found in the list:\n''').strip()) lowerCamelCase__ : Dict = '''''' if binary_search(sequence, target) else '''not ''' print(F'''{target} was {not_str}found in {sequence}''')
246
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> Any: _UpperCAmelCase : List[str] = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", """decoder.output_projection.weight""", ] for k in ignore_keys: state_dict.pop(_lowerCAmelCase, _lowerCAmelCase ) def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> Optional[int]: _UpperCAmelCase , _UpperCAmelCase : str = emb.weight.shape _UpperCAmelCase : Union[str, Any] = nn.Linear(_lowerCAmelCase, _lowerCAmelCase, bias=_lowerCAmelCase ) _UpperCAmelCase : List[str] = emb.weight.data return lin_layer def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : Dict="facebook/mbart-large-en-ro", _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Union[str, Any]=False ) -> Optional[Any]: _UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase, map_location="""cpu""" )["""model"""] remove_ignore_keys_(_lowerCAmelCase ) _UpperCAmelCase : int = state_dict["""encoder.embed_tokens.weight"""].shape[0] _UpperCAmelCase : str = MBartConfig.from_pretrained(_lowerCAmelCase, vocab_size=_lowerCAmelCase ) if mbart_aa and finetuned: _UpperCAmelCase : Any = """relu""" _UpperCAmelCase : Union[str, Any] = state_dict["""decoder.embed_tokens.weight"""] _UpperCAmelCase : Any = MBartForConditionalGeneration(_lowerCAmelCase ) model.model.load_state_dict(_lowerCAmelCase ) if finetuned: _UpperCAmelCase : List[Any] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCamelCase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCamelCase__ : str = parser.parse_args() lowerCamelCase__ : Optional[int] = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
246
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''', '''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''', '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json''' ), } class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """longformer""" def __init__(self , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 3_05_22 , SCREAMING_SNAKE_CASE_ = 7_68 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 30_72 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1E-12 , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = attention_window UpperCamelCase__ = sep_token_id UpperCamelCase__ = bos_token_id UpperCamelCase__ = eos_token_id UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = hidden_act UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = initializer_range UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = onnx_export class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "default" , SCREAMING_SNAKE_CASE_ = None ): super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = True @property def UpperCAmelCase_ (self ): if self.task == "multiple-choice": UpperCamelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: UpperCamelCase__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""global_attention_mask""", dynamic_axis), ] ) @property def UpperCAmelCase_ (self ): UpperCamelCase__ = super().outputs if self.task == "default": UpperCamelCase__ = {0: """batch"""} return outputs @property def UpperCAmelCase_ (self ): return 1E-4 @property def UpperCAmelCase_ (self ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ): UpperCamelCase__ = super().generate_dummy_inputs( preprocessor=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly UpperCamelCase__ = torch.zeros_like(inputs["""input_ids"""] ) # make every second token global UpperCamelCase__ = 1 return inputs
178
from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCamelCase_ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
178
1
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) # TODO Update this UpperCamelCase = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class snake_case_ ( __A ): __A : Tuple = "esm" def __init__( self : Optional[int] , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : Optional[int]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : List[Any]=30_72 , lowercase_ : Optional[int]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=10_26 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : str=1E-12 , lowercase_ : Optional[Any]="absolute" , lowercase_ : str=True , lowercase_ : Any=None , lowercase_ : int=False , lowercase_ : int=False , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[int] , ) -> Dict: super().__init__(pad_token_id=lowercase_ , mask_token_id=lowercase_ , **lowercase_ ) lowercase__ : List[Any] = vocab_size lowercase__ : Optional[Any] = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Optional[int] = intermediate_size lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : Tuple = initializer_range lowercase__ : str = layer_norm_eps lowercase__ : Tuple = position_embedding_type lowercase__ : str = use_cache lowercase__ : Dict = emb_layer_norm_before lowercase__ : Tuple = token_dropout lowercase__ : Optional[int] = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) lowercase__ : Optional[Any] = EsmFoldConfig() elif isinstance(lowercase_ , lowercase_ ): lowercase__ : List[str] = EsmFoldConfig(**lowercase_ ) lowercase__ : Optional[Any] = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) lowercase__ : List[Any] = get_default_vocab_list() else: lowercase__ : Union[str, Any] = vocab_list else: lowercase__ : Optional[int] = None lowercase__ : str = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowercase_ ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def __UpperCamelCase ( self : Any ) -> str: lowercase__ : Optional[int] = super().to_dict() if isinstance(self.esmfold_config , lowercase_ ): lowercase__ : Optional[Any] = self.esmfold_config.to_dict() return output @dataclass class snake_case_ : __A : str = None __A : bool = True __A : bool = False __A : bool = False __A : bool = False __A : float = 0 __A : bool = True __A : bool = False __A : int = 128 __A : "TrunkConfig" = None def __UpperCamelCase ( self : Dict ) -> List[str]: if self.trunk is None: lowercase__ : Dict = TrunkConfig() elif isinstance(self.trunk , lowercase_ ): lowercase__ : str = TrunkConfig(**self.trunk ) def __UpperCamelCase ( self : List[Any] ) -> int: lowercase__ : Optional[Any] = asdict(self ) lowercase__ : Union[str, Any] = self.trunk.to_dict() return output @dataclass class snake_case_ : __A : int = 48 __A : int = 1024 __A : int = 128 __A : int = 32 __A : int = 32 __A : int = 32 __A : float = 0 __A : float = 0 __A : bool = False __A : int = 4 __A : Optional[int] = 128 __A : "StructureModuleConfig" = None def __UpperCamelCase ( self : Dict ) -> List[str]: if self.structure_module is None: lowercase__ : int = StructureModuleConfig() elif isinstance(self.structure_module , lowercase_ ): lowercase__ : Dict = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase__ : List[str] = self.sequence_state_dim // self.sequence_head_width lowercase__ : List[str] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def __UpperCamelCase ( self : List[str] ) -> Optional[Any]: lowercase__ : Optional[int] = asdict(self ) lowercase__ : List[str] = self.structure_module.to_dict() return output @dataclass class snake_case_ : __A : int = 384 __A : int = 128 __A : int = 16 __A : int = 128 __A : int = 12 __A : int = 4 __A : int = 8 __A : float = 0.1 __A : int = 8 __A : int = 1 __A : int = 2 __A : int = 7 __A : int = 10 __A : float = 1e-8 __A : float = 1e5 def __UpperCamelCase ( self : Dict ) -> List[Any]: return asdict(self ) def lowercase_ ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
87
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_ ( __A ): __A : Optional[Any] = ["image_processor", "tokenizer"] __A : Tuple = "LayoutLMv3ImageProcessor" __A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]: lowercase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) lowercase__ : Optional[int] = kwargs.pop("feature_extractor" ) lowercase__ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase__ : Any = features["words"] lowercase__ : Tuple = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values lowercase__ : Optional[int] = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] ) lowercase__ : str = images return encoded_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase__ : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Any ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any] ) -> Tuple: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
87
1
'''simple docstring''' import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 8 ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = ascii_letters + digits + punctuation return "".join(secrets.choice(_a ) for _ in range(_a ) ) def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int ) -> Optional[int]: '''simple docstring''' i -= len(_a ) UpperCamelCase__ = i // 3 UpperCamelCase__ = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) UpperCamelCase__ = ( chars_incl + random(_a , quotient + remainder ) + random(_a , _a ) + random(_a , _a ) ) UpperCamelCase__ = list(_a ) shuffle(_a ) return "".join(_a ) # random is a generalised function for letters, characters and numbers def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' return "".join(secrets.choice(_a ) for _ in range(_a ) ) def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ) -> Optional[int]: '''simple docstring''' pass # Put your code here... def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' pass # Put your code here... def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : str ) -> Optional[Any]: '''simple docstring''' pass # Put your code here... def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int = 8 ) -> str: '''simple docstring''' if len(_a ) < min_length: # Your Password must be at least 8 characters long return False UpperCamelCase__ = any(char in ascii_uppercase for char in password ) UpperCamelCase__ = any(char in ascii_lowercase for char in password ) UpperCamelCase__ = any(char in digits for char in password ) UpperCamelCase__ = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def SCREAMING_SNAKE_CASE__( ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = int(input("Please indicate the max length of your password: " ).strip() ) UpperCamelCase__ = input( "Please indicate the characters that must be in your password: " ).strip() print("Password generated:" , password_generator(_a ) ) print( "Alternative Password generated:" , alternative_password_generator(_a , _a ) , ) print("[If you are thinking of using this passsword, You better save it.]" ) if __name__ == "__main__": main()
363
'''simple docstring''' def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float: '''simple docstring''' UpperCamelCase__ = 0 while len(_UpperCamelCase ) > 1: UpperCamelCase__ = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): UpperCamelCase__ = files.index(min(_UpperCamelCase ) ) temp += files[min_index] files.pop(_UpperCamelCase ) files.append(_UpperCamelCase ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
31
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase__ : Optional[Any] ={ '''configuration_mobilenet_v2''': [ '''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileNetV2Config''', '''MobileNetV2OnnxConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Dict =['''MobileNetV2FeatureExtractor'''] lowerCAmelCase__ : str =['''MobileNetV2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Union[str, Any] =[ '''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileNetV2ForImageClassification''', '''MobileNetV2ForSemanticSegmentation''', '''MobileNetV2Model''', '''MobileNetV2PreTrainedModel''', '''load_tf_weights_in_mobilenet_v2''', ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
257
from math import factorial def __lowercase ( a__ = 1_00 ) -> int: return sum(int(a__ ) for x in str(factorial(a__ ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
257
1
'''simple docstring''' import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument _lowercase : str = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def lowerCamelCase__ ( A : int ): '''simple docstring''' UpperCAmelCase = list(s_dict.keys() ) for key in keys: UpperCAmelCase = R'''.*/layers_(\d+)''' UpperCAmelCase = key if re.match(A , A ): UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , A ) UpperCAmelCase = R'''(encoder|decoder)\/''' if re.match(A , A ): UpperCAmelCase = re.match(A , A ).groups() if groups[0] == "encoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , A ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , A ) elif groups[0] == "decoder": UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , A ) UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , A ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: UpperCAmelCase = new_key.replace(A , A ) print(f"""{key} -> {new_key}""" ) UpperCAmelCase = s_dict.pop(A ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: UpperCAmelCase = s_dict[key].shape[0] UpperCAmelCase = s_dict[key] for idx in range(A ): UpperCAmelCase = expert_weihts[idx] print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" ) s_dict.pop(A ) return s_dict _lowercase : Optional[Any] = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def lowerCamelCase__ ( A : Optional[int] , A : List[Any] ): '''simple docstring''' import regex as re with open(A , '''r''' ) as f: UpperCAmelCase = f.read() UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , A ) UpperCAmelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": UpperCAmelCase = float(A ) if '''.''' in value else int(A ) UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , A )[0] UpperCAmelCase = str(activation[1] ) UpperCAmelCase = num_experts UpperCAmelCase = SwitchTransformersConfig(**A ) return config def lowerCamelCase__ ( A : Tuple , A : List[str] , A : Any=None , A : List[str]="./" , A : str=8 ): '''simple docstring''' print(f"""Loading flax weights from : {flax_checkpoint_path}""" ) UpperCAmelCase = checkpoints.load_tax_checkpoint(A ) if gin_file is not None: UpperCAmelCase = convert_gin_to_config(A , A ) else: UpperCAmelCase = SwitchTransformersConfig.from_pretrained(A ) UpperCAmelCase = SwitchTransformersForConditionalGeneration(A ) UpperCAmelCase = flax_params['''target'''] UpperCAmelCase = flatten_dict(A , sep='''/''' ) UpperCAmelCase = rename_keys(A ) UpperCAmelCase = unflatten_dict(A , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(A , A ) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(A ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") _lowercase : str = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
91
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[Any] = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys _lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 lowercase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class snake_case : '''simple docstring''' def __init__( self : Union[str, Any] ): '''simple docstring''' __A = WATERMARK_BITS __A = WatermarkEncoder() self.encoder.set_watermark('''bits''', self.watermark ) def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : torch.FloatTensor ): '''simple docstring''' # can't encode images that are smaller than 256 if images.shape[-1] < 2_56: return images __A = (2_55 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1 ).float().numpy() __A = [self.encoder.encode(_lowerCamelCase, '''dwtDct''' ) for image in images] __A = torch.from_numpy(np.array(_lowerCamelCase ) ).permute(0, 3, 1, 2 ) __A = torch.clamp(2 * (images / 2_55 - 0.5), min=-1.0, max=1.0 ) return images
266
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowercase_ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowercase_ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __A = bs[:] __A = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 __A = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char return pairs class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token super().__init__( errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = {v: k for k, v in self.encoder.items()} __A = errors # how to handle errors in decoding __A = bytes_to_unicode() __A = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle: __A = merges_handle.read().split('''\n''' )[1:-1] __A = [tuple(merge.split() ) for merge in bpe_merges] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = {} __A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __A = tuple(_lowerCamelCase ) __A = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(_lowerCamelCase ): try: __A = word.index(_lowerCamelCase, _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(_lowerCamelCase ) __A = new_word if len(_lowerCamelCase ) == 1: break else: __A = get_pairs(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = word return word def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ): '''simple docstring''' __A = [] for token in re.findall(self.pat, _lowerCamelCase ): __A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' return self.decoder.get(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ) __A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' ) __A = 0 with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) __A = token_index writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ): '''simple docstring''' __A = kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()): __A = ''' ''' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ): '''simple docstring''' __A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __A = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
266
1
from __future__ import annotations import pandas as pd def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]: a__: int = [0] * no_of_processes a__: Any = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(_SCREAMING_SNAKE_CASE ): a__: Any = burst_time[i] a__: Union[str, Any] = 0 a__: List[Any] = 0 a__: List[Any] = 999999999 a__: Optional[int] = 0 a__: Optional[int] = False # Process until all processes are completed while complete != no_of_processes: for j in range(_SCREAMING_SNAKE_CASE ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: a__: Optional[int] = remaining_time[j] a__: Union[str, Any] = j a__: int = True if not check: increment_time += 1 continue remaining_time[short] -= 1 a__: int = remaining_time[short] if minm == 0: a__: str = 999999999 if remaining_time[short] == 0: complete += 1 a__: Optional[Any] = False # Find finish time of current process a__: List[Any] = increment_time + 1 # Calculate waiting time a__: Tuple = finish_time - arrival_time[short] a__: Dict = finar - burst_time[short] if waiting_time[short] < 0: a__: List[str] = 0 # Increment time increment_time += 1 return waiting_time def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]: a__: Union[str, Any] = [0] * no_of_processes for i in range(_SCREAMING_SNAKE_CASE ): a__: Any = burst_time[i] + waiting_time[i] return turn_around_time def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None: a__: Tuple = 0 a__: int = 0 for i in range(_SCREAMING_SNAKE_CASE ): a__: Optional[Any] = total_waiting_time + waiting_time[i] a__: int = total_turn_around_time + turn_around_time[i] print(F'Average waiting time = {total_waiting_time / no_of_processes:.5f}' ) print('Average turn around time =' , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print('Enter how many process you want to analyze') lowercase__ = int(input()) lowercase__ = [0] * no_of_processes lowercase__ = [0] * no_of_processes lowercase__ = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print('Enter the arrival time and burst time for process:--' + str(i + 1)) lowercase__ , lowercase__ = map(int, input().split()) lowercase__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowercase__ = burst_time lowercase__ = no_of_processes lowercase__ = waiting_time lowercase__ = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) lowercase__ = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ 'Process', 'BurstTime', 'ArrivalTime', 'WaitingTime', 'TurnAroundTime', ], ) # Printing the dataFrame pd.set_option('display.max_rows', fcfs.shape[0] + 1) print(fcfs)
350
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowercase__ = logging.get_logger(__name__) lowercase__ = Dict[str, Any] lowercase__ = List[Prediction] @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase ): def __init__( self , *lowercase , **lowercase) -> Dict: '''simple docstring''' super().__init__(*lowercase , **lowercase) if self.framework == "tf": raise ValueError(f'The {self.__class__} is only available in PyTorch.') requires_backends(self , 'vision') self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items())) def lowerCamelCase_ ( self , **lowercase) -> int: '''simple docstring''' a__: Optional[Any] = {} if "threshold" in kwargs: a__: Dict = kwargs['threshold'] return {}, {}, postprocess_kwargs def __call__( self , *lowercase , **lowercase) -> Union[Predictions, List[Prediction]]: '''simple docstring''' return super().__call__(*lowercase , **lowercase) def lowerCamelCase_ ( self , lowercase) -> List[Any]: '''simple docstring''' a__: Optional[Any] = load_image(lowercase) a__: List[Any] = torch.IntTensor([[image.height, image.width]]) a__: Any = self.image_processor(images=[image] , return_tensors='pt') if self.tokenizer is not None: a__: Any = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt') a__: List[str] = target_size return inputs def lowerCamelCase_ ( self , lowercase) -> int: '''simple docstring''' a__: Any = model_inputs.pop('target_size') a__: Union[str, Any] = self.model(**lowercase) a__: List[str] = outputs.__class__({'target_size': target_size, **outputs}) if self.tokenizer is not None: a__: Union[str, Any] = model_inputs['bbox'] return model_outputs def lowerCamelCase_ ( self , lowercase , lowercase=0.9) -> Optional[Any]: '''simple docstring''' a__: int = model_outputs['target_size'] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. a__ , a__: str = target_size[0].tolist() def unnormalize(lowercase): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 10_00), (height * bbox[1] / 10_00), (width * bbox[2] / 10_00), (height * bbox[3] / 10_00), ])) a__ , a__: Optional[Any] = model_outputs['logits'].squeeze(0).softmax(dim=-1).max(dim=-1) a__: str = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] a__: Union[str, Any] = [unnormalize(lowercase) for bbox in model_outputs['bbox'].squeeze(0)] a__: Dict = ['score', 'label', 'box'] a__: Any = [dict(zip(lowercase , lowercase)) for vals in zip(scores.tolist() , lowercase , lowercase) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel a__: List[str] = self.image_processor.post_process_object_detection(lowercase , lowercase , lowercase) a__: Tuple = raw_annotations[0] a__: List[str] = raw_annotation['scores'] a__: int = raw_annotation['labels'] a__: int = raw_annotation['boxes'] a__: List[Any] = scores.tolist() a__: Any = [self.model.config.idalabel[label.item()] for label in labels] a__: Dict = [self._get_bounding_box(lowercase) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] a__: Optional[Any] = ['score', 'label', 'box'] a__: List[Any] = [ dict(zip(lowercase , lowercase)) for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes']) ] return annotation def lowerCamelCase_ ( self , lowercase) -> Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.') a__ , a__ , a__ , a__: List[Any] = box.int().tolist() a__: Optional[int] = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
203
0
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple: """simple docstring""" try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False) a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False) a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True) a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio a__ : Any =pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam a__ : Tuple =pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility a__ : Union[str, Any] =pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows a__ : int =pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]: """simple docstring""" try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Any: """simple docstring""" try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires regex' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Tuple ) -> List[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[str] ) -> List[str]: """simple docstring""" if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]: """simple docstring""" if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> Optional[Any]: """simple docstring""" if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> int: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> Any: """simple docstring""" def _require_spacy_model(__lowercase : Any ): try: import spacy # noqa F401 spacy.load(__lowercase ) except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase ) else: return test_case return _require_spacy_model def lowercase__ ( __lowercase : Union[str, Any] ) -> str: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip('test is slow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip('test is local' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip('test is packaged' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Any: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip('test requires remote' )(__lowercase ) return test_case def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple: """simple docstring""" def decorate(cls : int ): for name, fn in cls.__dict__.items(): if callable(__lowercase ) and name.startswith('test' ): for decorator in decorators: __UpperCamelCase = decorator(__lowercase ) setattr(cls , __lowercase , __lowercase ) return cls return decorate class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =0 SCREAMING_SNAKE_CASE_ : List[Any] =1 SCREAMING_SNAKE_CASE_ : Union[str, Any] =2 @contextmanager def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]: """simple docstring""" __UpperCamelCase = requests.Session().request def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __UpperCamelCase = timeout try: return online_request(__lowercase , __lowercase , **__lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict: """simple docstring""" __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir: try: os.chdir(__lowercase ) yield finally: os.chdir(__lowercase ) @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]: """simple docstring""" return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ): try: return func(*__lowercase , **__lowercase ) except HTTPError as err: if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ): pytest.xfail(str(__lowercase ) ) raise err return decorator.decorator(_wrapper , __lowercase ) class snake_case : """simple docstring""" def __init__( self : int , __A : Any , __A : str , __A : List[Any] ): __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str: """simple docstring""" while True: __UpperCamelCase = await stream.readline() if line: callback(__lowercase ) else: break async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(__lowercase ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ): __UpperCamelCase = line.decode('utf-8' ).rstrip() sink.append(__lowercase ) if not quiet: print(__lowercase , __lowercase , file=__lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ), ] , timeout=__lowercase , ) return _RunOutput(await p.wait() , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput: """simple docstring""" __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) ) __UpperCamelCase = ' '.join(__lowercase ) if result.returncode > 0: __UpperCamelCase = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M ) return int(__lowercase ) def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = 29500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
53
"""simple docstring""" from scipy.stats import pearsonr import datasets _a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' _a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' _a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): def __A ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , ) def __A ( self , a__ , a__ , a__=False ): if return_pvalue: _lowerCAmelCase : List[Any] = pearsonr(a__ , a__ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
44
0
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } UpperCAmelCase_ : int = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } UpperCAmelCase_ : Dict = { "facebook/blenderbot_small-90M": 512, } class UpperCamelCase ( _UpperCAmelCase ): lowerCAmelCase : int = VOCAB_FILES_NAMES lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : List[Any] = BlenderbotSmallTokenizer def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="<|endoftext|>" , UpperCAmelCase__="<|endoftext|>" , UpperCAmelCase__="<|endoftext|>" , UpperCAmelCase__=False , UpperCAmelCase__=True , **UpperCAmelCase__ , ): super().__init__( ByteLevelBPETokenizer( vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = add_prefix_space def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=None ): A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ): A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
355
import argparse import struct import unittest class UpperCamelCase : def __init__( self , UpperCAmelCase__ ): A__ = data # Initialize hash values A__ = [ 0x6A_09E_667, 0xBB_67A_E85, 0x3C_6EF_372, 0xA5_4FF_53A, 0x51_0E5_27F, 0x9B_056_88C, 0x1F_83D_9AB, 0x5B_E0C_D19, ] # Initialize round constants A__ = [ 0x42_8A2_F98, 0x71_374_491, 0xB5_C0F_BCF, 0xE9_B5D_BA5, 0x39_56C_25B, 0x59_F11_1F1, 0x92_3F8_2A4, 0xAB_1C5_ED5, 0xD8_07A_A98, 0x12_835_B01, 0x24_318_5BE, 0x55_0C7_DC3, 0x72_BE5_D74, 0x80_DEB_1FE, 0x9B_DC0_6A7, 0xC1_9BF_174, 0xE4_9B6_9C1, 0xEF_BE4_786, 0x0F_C19_DC6, 0x24_0CA_1CC, 0x2D_E92_C6F, 0x4A_748_4AA, 0x5C_B0A_9DC, 0x76_F98_8DA, 0x98_3E5_152, 0xA8_31C_66D, 0xB0_032_7C8, 0xBF_597_FC7, 0xC6_E00_BF3, 0xD5_A79_147, 0x06_CA6_351, 0x14_292_967, 0x27_B70_A85, 0x2E_1B2_138, 0x4D_2C6_DFC, 0x53_380_D13, 0x65_0A7_354, 0x76_6A0_ABB, 0x81_C2C_92E, 0x92_722_C85, 0xA2_BFE_8A1, 0xA8_1A6_64B, 0xC2_4B8_B70, 0xC7_6C5_1A3, 0xD1_92E_819, 0xD6_990_624, 0xF4_0E3_585, 0x10_6AA_070, 0x19_A4C_116, 0x1E_376_C08, 0x27_487_74C, 0x34_B0B_CB5, 0x39_1C0_CB3, 0x4E_D8A_A4A, 0x5B_9CC_A4F, 0x68_2E6_FF3, 0x74_8F8_2EE, 0x78_A56_36F, 0x84_C87_814, 0x8C_C70_208, 0x90_BEF_FFA, 0xA4_506_CEB, 0xBE_F9A_3F7, 0xC6_717_8F2, ] A__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def __A ( UpperCAmelCase__ ): A__ = b"\x80" + (b"\x00" * (63 - (len(UpperCAmelCase__ ) + 8) % 64)) A__ = struct.pack(">Q" , (len(UpperCAmelCase__ ) * 8) ) return data + padding + big_endian_integer def __A ( self ): # Convert into blocks of 64 bytes A__ = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A__ = list(struct.unpack(">16L" , UpperCAmelCase__ ) ) # add 48 0-ed integers words += [0] * 48 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array A__ = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) A__ = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) A__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100_000_000 # Compression A__ = self.ror(UpperCAmelCase__ , 6 ) ^ self.ror(UpperCAmelCase__ , 11 ) ^ self.ror(UpperCAmelCase__ , 25 ) A__ = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g) A__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100_000_000 A__ = self.ror(UpperCAmelCase__ , 2 ) ^ self.ror(UpperCAmelCase__ , 13 ) ^ self.ror(UpperCAmelCase__ , 22 ) A__ = (a & b) ^ (a & c) ^ (b & c) A__ = (sa + maj) % 0x100_000_000 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = ( g, f, e, ((d + tempa) % 0x100_000_000), c, b, a, ((tempa + tempa) % 0x100_000_000), ) A__ = [a, b, c, d, e, f, g, h] # Modify final values A__ = [ ((element + mutated_hash_values[index]) % 0x100_000_000) for index, element in enumerate(self.hashes ) ] A__ = "".join([hex(UpperCAmelCase__ )[2:].zfill(8 ) for value in self.hashes] ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ): return 0xFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase ( unittest.TestCase ): def __A ( self ): import hashlib A__ = bytes("Test String" , "utf-8" ) self.assertEqual(SHAaaa(UpperCAmelCase__ ).hash , hashlib.shaaaa(UpperCAmelCase__ ).hexdigest() ) def UpperCamelCase ( )-> None: """simple docstring""" import doctest doctest.testmod() A__ = argparse.ArgumentParser() parser.add_argument( "-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument( "-f" , "--file" , dest="input_file" , help="Hash contents of a file" ) A__ = parser.parse_args() A__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , "rb" ) as f: A__ = f.read() else: A__ = bytes(_A , "utf-8" ) print(SHAaaa(_A ).hash ) if __name__ == "__main__": main()
198
0
"""simple docstring""" import numpy as np import qiskit def _snake_case ( lowercase__ = 8 , lowercase__ = None ): _lowerCamelCase : str = np.random.default_rng(seed=lowercase__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. _lowerCamelCase : List[str] = 6 * key_len # Measurement basis for Alice's qubits. _lowerCamelCase : int = rng.integers(2 , size=lowercase__ ) # The set of states Alice will prepare. _lowerCamelCase : str = rng.integers(2 , size=lowercase__ ) # Measurement basis for Bob's qubits. _lowerCamelCase : str = rng.integers(2 , size=lowercase__ ) # Quantum Circuit to simulate BB84 _lowerCamelCase : Dict = qiskit.QuantumCircuit(lowercase__ , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(lowercase__ ): if alice_state[index] == 1: bbaa_circ.x(lowercase__ ) if alice_basis[index] == 1: bbaa_circ.h(lowercase__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(lowercase__ ): if bob_basis[index] == 1: bbaa_circ.h(lowercase__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. _lowerCamelCase : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. _lowerCamelCase : List[Any] = qiskit.execute(lowercase__ , lowercase__ , shots=1 , seed_simulator=lowercase__ ) # Returns the result of measurement. _lowerCamelCase : Optional[Any] = job.result().get_counts(lowercase__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. _lowerCamelCase : Optional[int] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( lowercase__ , lowercase__ , lowercase__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. _lowerCamelCase : Union[str, Any] = gen_key[:key_len] if len(lowercase__ ) >= key_len else gen_key.ljust(lowercase__ , '0' ) return key if __name__ == "__main__": print(F"The generated key is : {bbaa(8, seed=0)}") from doctest import testmod testmod()
96
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
0
"""simple docstring""" import math class UpperCamelCase : def _lowercase ( self : Tuple , UpperCAmelCase__ : list[list[float]] , UpperCAmelCase__ : list[int] ) -> int: _a : str = 0.0 _a : Optional[int] = 0.0 for i in range(len(UpperCAmelCase__ ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def _lowercase ( self : Optional[int] , UpperCAmelCase__ : list[list[int | float]] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : float ) -> list[list[int | float]]: for i in range(len(UpperCAmelCase__ ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def lowerCAmelCase__ ( ): '''simple docstring''' # Training Examples ( m, n ) _a : Tuple = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _a : Tuple = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _a : Union[str, Any] = SelfOrganizingMap() _a : Optional[Any] = 3 _a : List[Any] = 0.5 for _ in range(UpperCamelCase__ ): for j in range(len(UpperCamelCase__ ) ): # training sample _a : List[str] = training_samples[j] # Compute the winning vector _a : Tuple = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ ) # Update the winning vector _a : Any = self_organizing_map.update(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # classify test sample _a : str = [0, 0, 0, 1] _a : str = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ ) # results print(F"""Clusters that the test sample belongs to : {winner}""" ) print(F"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
324
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) _a : Dict = DatasetInfosDict.from_directory(UpperCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 4_2 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ), ] , ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Optional[int] = str(UpperCamelCase__ ) dataset_info.write_to_directory(UpperCamelCase__ ) _a : Any = DatasetInfo.from_directory(UpperCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase__ , """dataset_info.json""" ) ) def lowerCAmelCase__ ( ): '''simple docstring''' _a : Dict = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , ) _a : int = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _a : List[str] = yaml.safe_dump(UpperCamelCase__ ) _a : Optional[int] = yaml.safe_load(UpperCamelCase__ ) assert dataset_info_yaml_dict == reloaded def lowerCAmelCase__ ( ): '''simple docstring''' _a : List[Any] = DatasetInfo() _a : Any = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=4_2 ), """v2""": DatasetInfo(dataset_size=1_3_3_7 ), } ), ] , ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : List[Any] = str(UpperCamelCase__ ) dataset_infos_dict.write_to_directory(UpperCamelCase__ ) _a : List[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _a : str = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _a : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase__ , """README.md""" ) )
324
1
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path A : Optional[Any] = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def UpperCamelCase ( __magic_name__ : Tuple=True ) -> List[Any]: """simple docstring""" if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCAmelCase__ ) ) class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = None A__ = None def lowerCamelCase__ (self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> Optional[Any]: """simple docstring""" with TemporaryDirectory() as tmp_dir: lowercase__ = dataset_module_factory(_UpperCAmelCase , cache_dir=_UpperCAmelCase ) lowercase__ = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase ) lowercase__ = builder_cls( cache_dir=_UpperCAmelCase , config_name=_UpperCAmelCase , hash=dataset_module.hash , ) lowercase__ = """/""".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=_UpperCAmelCase ).replace(os.sep , """/""" ), config.DATASET_INFO_FILENAME, ] ) lowercase__ = cached_path(_UpperCAmelCase , cache_dir=_UpperCAmelCase ) self.assertTrue(os.path.exists(_UpperCAmelCase ) ) @pytest.mark.integration def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple""" lowercase__ = dataset_module_factory("""wikipedia""" , cache_dir=__magic_name__ ) lowercase__ = import_main_class(dataset_module.module_path ) lowercase__ = builder_cls( cache_dir=__magic_name__ , config_name="""20220301.frr""" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam lowercase__ = None builder_instance.download_and_prepare() lowercase__ = builder_instance.as_dataset() assert ds @pytest.mark.integration def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = dataset_module_factory("""wikipedia""" , cache_dir=__magic_name__ ) lowercase__ = import_main_class(dataset_module.module_path , dataset=__magic_name__ ) lowercase__ = builder_cls( cache_dir=__magic_name__ , config_name="""20220301.frr""" , hash=dataset_module.hash , ) lowercase__ = builder_instance.as_streaming_dataset() assert ds assert isinstance(__magic_name__ , __magic_name__ ) assert "train" in ds assert isinstance(ds["""train"""] , __magic_name__ ) assert next(iter(ds["""train"""] ) )
305
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]: """simple docstring""" return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]: """simple docstring""" lowercase__ = create_tensor(__magic_name__ ) lowercase__ = gather(__magic_name__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" lowercase__ = [state.process_index] lowercase__ = gather_object(__magic_name__ ) assert len(__magic_name__ ) == state.num_processes, f'''{gathered_obj}, {len(__magic_name__ )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}''' def UpperCamelCase ( __magic_name__ : str ) -> Dict: """simple docstring""" lowercase__ = create_tensor(__magic_name__ ) lowercase__ = broadcast(__magic_name__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def UpperCamelCase ( __magic_name__ : str ) -> Dict: """simple docstring""" if state.is_main_process: lowercase__ = torch.arange(state.num_processes + 1 ).to(state.device ) else: lowercase__ = torch.arange(state.num_processes ).to(state.device ) lowercase__ = pad_across_processes(__magic_name__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]: """simple docstring""" if state.num_processes != 2: return lowercase__ = create_tensor(__magic_name__ ) lowercase__ = reduce(__magic_name__ , """sum""" ) lowercase__ = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}''' def UpperCamelCase ( __magic_name__ : Dict ) -> int: """simple docstring""" if state.num_processes != 2: return lowercase__ = create_tensor(__magic_name__ ) lowercase__ = reduce(__magic_name__ , """mean""" ) lowercase__ = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}''' def UpperCamelCase ( __magic_name__ : str ) -> int: """simple docstring""" main() def UpperCamelCase ( ) -> Optional[int]: """simple docstring""" lowercase__ = PartialState() state.print(f'''State: {state}''' ) state.print("""testing gather""" ) test_gather(__magic_name__ ) state.print("""testing gather_object""" ) test_gather_object(__magic_name__ ) state.print("""testing broadcast""" ) test_broadcast(__magic_name__ ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(__magic_name__ ) state.print("""testing reduce_sum""" ) test_reduce_sum(__magic_name__ ) state.print("""testing reduce_mean""" ) test_reduce_mean(__magic_name__ ) if __name__ == "__main__": main()
305
1
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration lowerCamelCase__ = HfArgumentParser(InitializationArguments) lowerCamelCase__ = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization lowerCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks lowerCamelCase__ = { """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) lowerCamelCase__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config lowerCamelCase__ = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
361
from __future__ import annotations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]: lowerCAmelCase__ : list[list[int]] = [] create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ ) return result def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None: if level == 0: total_list.append(current_list[:] ) return for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ): current_list.append(SCREAMING_SNAKE_CASE_ ) create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) current_list.pop() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None: for i in total_list: print(*SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowerCamelCase__ = 4 lowerCamelCase__ = 2 lowerCamelCase__ = generate_all_combinations(n, k) print_all_state(total_list)
307
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", } class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' lowerCAmelCase = '''gpt_neox_japanese''' def __init__( self , a=3_20_00 , a=25_60 , a=32 , a=32 , a=4 , a="gelu" , a=1.00 , a=1_00_00 , a=20_48 , a=0.02 , a=1E-5 , a=True , a=3_19_96 , a=3_19_99 , a=0.1 , a=0.0 , **a , ) -> int: super().__init__(bos_token_id=a , eos_token_id=a , **a ) snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_multiple_size snake_case_ = hidden_act snake_case_ = rotary_pct snake_case_ = rotary_emb_base snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = use_cache snake_case_ = attention_dropout snake_case_ = hidden_dropout
178
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _UpperCamelCase ( self ) -> int: snake_case_ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) snake_case_ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) model.to(a ) from datasets import load_dataset snake_case_ = load_dataset('nielsr/rvlcdip-demo' ) snake_case_ = dataset['train'][0]['image'].convert('RGB' ) snake_case_ = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): snake_case_ = model(**a ) snake_case_ = outputs.logits snake_case_ = torch.Size((1, 16) ) self.assertEqual(logits.shape , a ) snake_case_ = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1E-4 ) )
178
1
from functools import lru_cache def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int: '''simple docstring''' lowercase_ = 2 lowercase_ = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(A__ ) if n > 1: factors.add(A__ ) return factors @lru_cache def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict: '''simple docstring''' return len(unique_prime_factors(A__ ) ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]: '''simple docstring''' return len(set(A__ ) ) in (0, 1) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]: '''simple docstring''' lowercase_ = 2 while True: # Increment each value of a generated range lowercase_ = [base + i for i in range(A__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. lowercase_ = [upf_len(A__ ) for x in group] checker.append(A__ ) # If all numbers in the list are equal, return the group variable. if equality(A__ ): return group # Increment our base variable by 1 base += 1 def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 4 ) -> str: '''simple docstring''' lowercase_ = run(A__ ) return results[0] if len(A__ ) else None if __name__ == "__main__": print(solution())
357
"""simple docstring""" def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list: '''simple docstring''' if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ): raise TypeError("""Sequence must be list of non-negative integers""" ) for _ in range(len(__lowerCAmelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
313
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { "configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimesformerModel", "TimesformerForVideoClassification", "TimesformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
'''simple docstring''' def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: """simple docstring""" _UpperCAmelCase : List[str] = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): _UpperCAmelCase : Any = n - k # Calculate C(n,k) for i in range(_UpperCAmelCase ): result *= n - i result //= i + 1 return result def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , _UpperCAmelCase ) // (node_count + 1) def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) _UpperCAmelCase : List[str] = 1 for i in range(1 , n + 1 ): result *= i return result def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" return catalan_number(_UpperCAmelCase ) * factorial(_UpperCAmelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( F'Given {node_count} nodes, there are {binary_tree_count(node_count)} ' F'binary trees and {catalan_number(node_count)} binary search trees.' )
31
0
"""simple docstring""" from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _A = 6_378_137.0 _A = 6_356_752.314_245 _A = 6_37_81_37 def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float: UpperCAmelCase__ : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude UpperCAmelCase__ : List[str] = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) ) UpperCAmelCase__ : str = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius UpperCAmelCase__ : Any = haversine_distance(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values UpperCAmelCase__ : int = (b_lata + b_lata) / 2 UpperCAmelCase__ : Any = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) UpperCAmelCase__ : Optional[Any] = (sin(lowerCAmelCase ) ** 2) * (cos(lowerCAmelCase ) ** 2) UpperCAmelCase__ : Optional[Any] = cos(sigma / 2 ) ** 2 UpperCAmelCase__ : Optional[int] = (sigma - sin(lowerCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) UpperCAmelCase__ : Optional[int] = (cos(lowerCAmelCase ) ** 2) * (sin(lowerCAmelCase ) ** 2) UpperCAmelCase__ : str = sin(sigma / 2 ) ** 2 UpperCAmelCase__ : Union[str, Any] = (sigma + sin(lowerCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
166
"""simple docstring""" _A = range(2, 20 + 1) _A = [10**k for k in range(ks[-1] + 1)] _A = {} def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int: UpperCAmelCase__ : List[str] = sum(a_i[j] for j in range(lowerCAmelCase , len(lowerCAmelCase ) ) ) UpperCAmelCase__ : str = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase ) , lowerCAmelCase ) ) ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = 0, 0 UpperCAmelCase__ : Optional[Any] = n - i UpperCAmelCase__ : Union[str, Any] = memo.get(lowerCAmelCase ) if sub_memo is not None: UpperCAmelCase__ : Any = sub_memo.get(lowerCAmelCase ) if jumps is not None and len(lowerCAmelCase ) > 0: # find and make the largest jump without going over UpperCAmelCase__ : Optional[int] = -1 for _k in range(len(lowerCAmelCase ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: UpperCAmelCase__ : str = _k break if max_jump >= 0: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = jumps[max_jump] # since the difference between jumps is cached, add c UpperCAmelCase__ : Any = diff + c for j in range(min(lowerCAmelCase , len(lowerCAmelCase ) ) ): UpperCAmelCase__ , UpperCAmelCase__ : Tuple = divmod(lowerCAmelCase , 10 ) if new_c > 0: add(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else: UpperCAmelCase__ : int = [] else: UpperCAmelCase__ : Union[str, Any] = {c: []} UpperCAmelCase__ : Union[str, Any] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = next_term(lowerCAmelCase , k - 1 , i + dn , lowerCAmelCase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead UpperCAmelCase__ , UpperCAmelCase__ : Tuple = compute(lowerCAmelCase , lowerCAmelCase , i + dn , lowerCAmelCase ) diff += _diff dn += terms_jumped UpperCAmelCase__ : str = sub_memo[c] # keep jumps sorted by # of terms skipped UpperCAmelCase__ : Any = 0 while j < len(lowerCAmelCase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(lowerCAmelCase , (diff, dn, k) ) return (diff, dn) def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]: if i >= n: return 0, i if k > len(lowerCAmelCase ): a_i.extend([0 for _ in range(k - len(lowerCAmelCase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) UpperCAmelCase__ : Tuple = i UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = 0, 0, 0 for j in range(len(lowerCAmelCase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 UpperCAmelCase__ : Dict = ds_c + ds_b diff += addend UpperCAmelCase__ : Tuple = 0 for j in range(lowerCAmelCase ): UpperCAmelCase__ : Tuple = a_i[j] + addend UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = divmod(lowerCAmelCase , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) return diff, i - start_i def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]: for j in range(lowerCAmelCase , len(lowerCAmelCase ) ): UpperCAmelCase__ : Optional[Any] = digits[j] + addend if s >= 10: UpperCAmelCase__ , UpperCAmelCase__ : Dict = divmod(lowerCAmelCase , 10 ) UpperCAmelCase__ : Any = addend // 10 + quotient else: UpperCAmelCase__ : Optional[Any] = s UpperCAmelCase__ : Tuple = addend // 10 if addend == 0: break while addend > 0: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = divmod(lowerCAmelCase , 10 ) digits.append(lowerCAmelCase ) def a__ ( lowerCAmelCase = 10**15 ) -> int: UpperCAmelCase__ : Optional[int] = [1] UpperCAmelCase__ : Union[str, Any] = 1 UpperCAmelCase__ : Dict = 0 while True: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = next_term(lowerCAmelCase , 20 , i + dn , lowerCAmelCase ) dn += terms_jumped if dn == n - i: break UpperCAmelCase__ : Optional[int] = 0 for j in range(len(lowerCAmelCase ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f'''{solution() = }''')
166
1
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class lowerCAmelCase__ : '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = None # Automatically constructed __UpperCamelCase = "dict" __UpperCamelCase = None __UpperCamelCase = field(default="Translation" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ ) def __call__( self : List[str]): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages)}) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' from .features import Value return {k: Value('''string''') for k in sorted(self.languages)} @dataclass class lowerCAmelCase__ : '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None # Automatically constructed __UpperCamelCase = "dict" __UpperCamelCase = None __UpperCamelCase = field(default="TranslationVariableLanguages" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = sorted(set(self.languages)) if self.languages else None SCREAMING_SNAKE_CASE_ : Any = len(self.languages) if self.languages else None def __call__( self : Optional[int]): '''simple docstring''' return pa.struct({'''language''': pa.list_(pa.string()), '''translation''': pa.list_(pa.string())}) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = set(self.languages) if self.languages and set(lowercase_) - lang_set: raise ValueError( F'Some languages in example ({", ".join(sorted(set(lowercase_) - lang_set))}) are not in valid set ({", ".join(lowercase_)}).') # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. SCREAMING_SNAKE_CASE_ : int = [] for lang, text in translation_dict.items(): if isinstance(lowercase_ , lowercase_): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text]) # Ensure translations are in ascending order by language code. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = zip(*sorted(lowercase_)) return {"language": languages, "translation": translations} def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value('''string''')), "translation": Sequence(Value('''string''')), }
91
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = (PNDMScheduler,) __UpperCamelCase = (("num_inference_steps", 5_0),) def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = { '''num_train_timesteps''': 1000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowercase_) return config def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]=0 , **lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class.from_pretrained(lowercase_) new_scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Optional[Any] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=0 , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(lowercase_) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Any = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Dict = 10 SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_) for i, t in enumerate(scheduler.prk_timesteps): SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample for i, t in enumerate(scheduler.plms_timesteps): SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_) for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps'''): scheduler.set_timesteps(lowercase_) elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps'''): SCREAMING_SNAKE_CASE_ : Optional[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1) SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowercase_) scheduler.set_timesteps(10) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , ) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]): self.check_over_forward(num_inference_steps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27 for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample SCREAMING_SNAKE_CASE_ : str = 0.1 * sample SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2]): SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' with self.assertRaises(lowercase_): SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowercase_) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.full_loop() SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_98.13_18) < 1e-2 assert abs(result_mean.item() - 0.25_80) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.full_loop(prediction_type='''v_prediction''') SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 67.39_86) < 1e-2 assert abs(result_mean.item() - 0.08_78) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 2_30.03_99) < 1e-2 assert abs(result_mean.item() - 0.29_95) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_86.94_82) < 1e-2 assert abs(result_mean.item() - 0.24_34) < 1e-3
91
1
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case : def __init__( self : Optional[Any], __lowercase : Optional[Any], __lowercase : Any=13, __lowercase : Optional[Any]=3, __lowercase : str=True, __lowercase : Union[str, Any]=True, __lowercase : int=0.1, __lowercase : Any=0.1, __lowercase : Optional[int]=224, __lowercase : Tuple=1000, __lowercase : Dict=[3, 3, 6, 4], __lowercase : str=[48, 56, 112, 220], ): lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = num_labels lowercase__ = image_size lowercase__ = layer_depths lowercase__ = embed_dims def A__ ( self : Tuple ): lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size], self.num_labels ) lowercase__ = self.get_config() return config, pixel_values, labels def A__ ( self : List[Any] ): return SwiftFormerConfig( depths=self.layer_depths, embed_dims=self.embed_dims, mlp_ratio=4, downsamples=[True, True, True, True], hidden_act="gelu", num_labels=self.num_labels, down_patch_size=3, down_stride=2, down_pad=1, drop_rate=0.0, drop_path_rate=0.0, use_layer_scale=__lowercase, layer_scale_init_value=1e-5, ) def A__ ( self : str, __lowercase : Optional[Any], __lowercase : Tuple, __lowercase : Union[str, Any] ): lowercase__ = SwiftFormerModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowercase__ = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dims[-1], 7, 7) ) def A__ ( self : Optional[int], __lowercase : Optional[Any], __lowercase : Dict, __lowercase : Dict ): lowercase__ = self.num_labels lowercase__ = SwiftFormerForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() lowercase__ = model(__lowercase, labels=__lowercase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) lowercase__ = SwiftFormerForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = model(__lowercase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def A__ ( self : Optional[Any] ): ((lowercase__) , (lowercase__) , (lowercase__)) = self.prepare_config_and_inputs() lowercase__ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase): UpperCamelCase__ : int =(SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () UpperCamelCase__ : Optional[int] =( {"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ : Dict =False UpperCamelCase__ : List[Any] =False UpperCamelCase__ : Optional[Any] =False UpperCamelCase__ : int =False UpperCamelCase__ : Union[str, Any] =False def A__ ( self : Dict ): lowercase__ = SwiftFormerModelTester(self ) lowercase__ = ConfigTester( self, config_class=__lowercase, has_text_modality=__lowercase, hidden_size=37, num_attention_heads=12, num_hidden_layers=12, ) def A__ ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds" ) def A__ ( self : Dict ): pass def A__ ( self : Optional[Any] ): lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(__lowercase ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase, nn.Linear ) ) def A__ ( self : List[Any] ): lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(__lowercase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["pixel_values"] self.assertListEqual(arg_names[:1], __lowercase ) def A__ ( self : Tuple ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def A__ ( self : List[str] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) @slow def A__ ( self : str ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = SwiftFormerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) @unittest.skip(reason="SwiftFormer does not output attentions" ) def A__ ( self : List[str] ): pass def A__ ( self : Optional[Any] ): def check_hidden_states_output(__lowercase : Any, __lowercase : Optional[int], __lowercase : Optional[int] ): lowercase__ = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) ) lowercase__ = outputs.hidden_states lowercase__ = 8 self.assertEqual(len(__lowercase ), __lowercase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__lowercase ) ): self.assertEqual( hidden_states[i].shape, torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ), ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = True check_hidden_states_output(__lowercase, __lowercase, __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ = True check_hidden_states_output(__lowercase, __lowercase, __lowercase ) def A__ ( self : Any ): def _config_zero_init(__lowercase : List[str] ): lowercase__ = copy.deepcopy(__lowercase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__lowercase, __lowercase, 1e-1_0 ) if isinstance(getattr(__lowercase, __lowercase, __lowercase ), __lowercase ): lowercase__ = _config_zero_init(getattr(__lowercase, __lowercase ) ) setattr(__lowercase, __lowercase, __lowercase ) return configs_no_init lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = _config_zero_init(__lowercase ) for model_class in self.all_model_classes: lowercase__ = model_class(config=__lowercase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A__ ( self : Optional[Any] ): pass def __lowerCAmelCase ( ): lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase): @cached_property def A__ ( self : List[str] ): return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None @slow def A__ ( self : Dict ): lowercase__ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowercase ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=__lowercase, return_tensors="pt" ).to(__lowercase ) # forward pass with torch.no_grad(): lowercase__ = model(**__lowercase ) # verify the logits lowercase__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape, __lowercase ) lowercase__ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __lowercase, atol=1e-4 ) )
355
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = int(SCREAMING_SNAKE_CASE_ ) if decimal in (0, 1): # Exit cases for the recursion return str(SCREAMING_SNAKE_CASE_ ) lowercase__ , lowercase__ = divmod(SCREAMING_SNAKE_CASE_ , 2 ) return binary_recursive(SCREAMING_SNAKE_CASE_ ) + str(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(SCREAMING_SNAKE_CASE_ ).strip() if not number: raise ValueError("No input value was provided" ) lowercase__ = "-" if number.startswith("-" ) else "" lowercase__ = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return f'''{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE_ ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
224
0
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case__( snake_case_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = '''new-model''' if is_tf_available(): class snake_case__( snake_case_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = NewModelConfig @require_tf class snake_case__( unittest.TestCase ): '''simple docstring''' @slow def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Optional[int] = "bert-base-cased" lowerCAmelCase_ : Dict = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase_ : Tuple = TFAutoModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = "bert-base-cased" lowerCAmelCase_ : int = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase_ : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow def lowercase_ ( self ) -> Tuple: for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase_ : List[Any] = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ : Dict = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow def lowercase_ ( self ) -> Any: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Tuple = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase_ : Any = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow def lowercase_ ( self ) -> Any: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase_ : Dict = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ : str = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow def lowercase_ ( self ) -> List[Any]: for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase_ : str = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow def lowercase_ ( self ) -> List[Any]: for model_name in ["bert-base-uncased"]: lowerCAmelCase_ : str = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase_ : Any = TFAutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow def lowercase_ ( self ) -> int: for model_name in ["bert-base-uncased"]: lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase_ : str = TFAutoModelForQuestionAnswering.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow @require_tensorflow_probability def lowercase_ ( self ) -> Optional[int]: for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: lowerCAmelCase_ : Any = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase_ : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ : Any = TFAutoModelForTableQuestionAnswering.from_pretrained( UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : List[Any] = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase__ ) , 1_4_4_1_0 ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : List[str] = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase__ ) , 1_4_4_1_0 ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Optional[int] = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase_ : Tuple = copy.deepcopy(model.config ) lowerCAmelCase_ : str = ["FunnelBaseModel"] lowerCAmelCase_ : int = TFAutoModel.from_config(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCamelCase__ ) lowerCAmelCase_ : Union[str, Any] = TFAutoModel.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( self ) -> Tuple: try: AutoConfig.register('''new-model''' , UpperCamelCase__ ) lowerCAmelCase_ : int = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(UpperCamelCase__ ): auto_class.register(UpperCamelCase__ , UpperCamelCase__ ) auto_class.register(UpperCamelCase__ , UpperCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase__ ): auto_class.register(UpperCamelCase__ , UpperCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCAmelCase_ : List[str] = BertModelTester(self ).get_config() lowerCAmelCase_ : List[Any] = NewModelConfig(**tiny_config.to_dict() ) lowerCAmelCase_ : Optional[Any] = auto_class.from_config(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCamelCase__ ) lowerCAmelCase_ : str = auto_class.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def lowercase_ ( self ) -> str: with self.assertRaisesRegex( UpperCamelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowerCAmelCase_ : Union[str, Any] = TFAutoModel.from_pretrained('''bert-base''' ) def lowercase_ ( self ) -> int: with self.assertRaisesRegex( UpperCamelCase__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowerCAmelCase_ : Union[str, Any] = TFAutoModel.from_pretrained(UpperCamelCase__ , revision='''aaaaaa''' ) def lowercase_ ( self ) -> Tuple: with self.assertRaisesRegex( UpperCamelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): lowerCAmelCase_ : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def lowercase_ ( self ) -> Optional[Any]: with self.assertRaisesRegex(UpperCamelCase__ , '''Use `from_pt=True` to load this model''' ): lowerCAmelCase_ : Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowerCAmelCase_ : Optional[int] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint lowerCAmelCase_ : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: lowerCAmelCase_ : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
262
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _lowerCAmelCase ( unittest.TestCase ): __UpperCAmelCase : Union[str, Any] = JukeboxTokenizer __UpperCAmelCase : Union[str, Any] = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def lowerCamelCase ( self ) -> int: '''simple docstring''' import torch snake_case : Any = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" ) snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"] # fmt: off snake_case : Optional[int] = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def lowerCamelCase ( self ) -> Any: '''simple docstring''' import torch snake_case : Tuple = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" ) snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"] # fmt: off snake_case : List[Any] = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
203
0
'''simple docstring''' def _lowerCamelCase ( lowercase : Tuple ) -> Tuple: # noqa: E741 _a = len(__UpperCAmelCase ) _a = 0 _a = [0] * n _a = [False] * n _a = [False] * n def dfs(lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[Any] ): if parent == root: out_edge_count += 1 _a = True _a = at for to in l[at]: if to == parent: pass elif not visited[to]: _a = dfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: _a = True # AP found via cycle if at == low[to]: _a = True else: _a = min(low[at] , __UpperCAmelCase ) return out_edge_count for i in range(__UpperCAmelCase ): if not visited[i]: _a = 0 _a = dfs(__UpperCAmelCase , __UpperCAmelCase , -1 , __UpperCAmelCase ) _a = out_edge_count > 1 for x in range(len(__UpperCAmelCase ) ): if is_art[x] is True: print(__UpperCAmelCase ) # Adjacency list of graph lowerCAmelCase_ : List[Any] = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
352
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : int = '▁' lowerCAmelCase_ : Optional[Any] = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', } lowerCAmelCase_ : Optional[int] = { 'vocab_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json' ), }, 'spm_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model' ) }, } lowerCAmelCase_ : List[str] = { 'facebook/s2t-small-librispeech-asr': 10_24, } lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de'] lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS} class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =VOCAB_FILES_NAMES __a =PRETRAINED_VOCAB_FILES_MAP __a =MAX_MODEL_INPUT_SIZES __a =['input_ids', 'attention_mask'] __a =[] def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ): _a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) _a = do_upper_case _a = do_lower_case _a = load_json(__a ) _a = {v: k for k, v in self.encoder.items()} _a = spm_file _a = load_spm(__a , self.sp_model_kwargs ) if lang_codes is not None: _a = lang_codes _a = LANGUAGES[lang_codes] _a = [f'<lang:{lang}>' for lang in self.langs] _a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs} _a = self.lang_tokens _a = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: _a = {} @property def UpperCamelCase__ ( self : str ): return len(self.encoder ) @property def UpperCamelCase__ ( self : str ): return self._tgt_lang @tgt_lang.setter def UpperCamelCase__ ( self : Optional[int] , __a : Any ): _a = new_tgt_lang self.set_tgt_lang_special_tokens(__a ) def UpperCamelCase__ ( self : List[Any] , __a : str ): _a = self.lang_code_to_id[tgt_lang] _a = [lang_code_id] def UpperCamelCase__ ( self : Dict , __a : str ): return self.sp_model.encode(__a , out_type=__a ) def UpperCamelCase__ ( self : List[str] , __a : Any ): return self.encoder.get(__a , self.encoder[self.unk_token] ) def UpperCamelCase__ ( self : str , __a : int ): return self.decoder.get(__a , self.unk_token ) def UpperCamelCase__ ( self : str , __a : List[str] ): _a = [] _a = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: _a = self.sp_model.decode(__a ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " _a = [] else: current_sub_tokens.append(__a ) _a = self.sp_model.decode(__a ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) _a = [1] * len(self.prefix_tokens ) _a = [1] if token_ids_a is None: return prefix_ones + ([0] * len(__a )) + suffix_ones return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones def UpperCamelCase__ ( self : Union[str, Any] ): _a = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): _a = self.__dict__.copy() _a = None return state def __setstate__( self : str , __a : Dict ): _a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _a = {} _a = load_spm(self.spm_file , self.sp_model_kwargs ) def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ): _a = Path(__a ) assert save_dir.is_dir(), f'{save_directory} should be a directory' _a = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) _a = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder , __a ) if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __a ) elif not os.path.isfile(self.spm_file ): with open(__a , "wb" ) as fi: _a = self.sp_model.serialized_model_proto() fi.write(__a ) return (str(__a ), str(__a )) def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: _a = sentencepiece.SentencePieceProcessor(**lowercase ) spm.Load(str(lowercase ) ) return spm def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]: with open(lowercase , "r" ) as f: return json.load(lowercase ) def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None: with open(lowercase , "w" ) as f: json.dump(lowercase , lowercase , indent=2 )
346
0
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": snake_case_ : Optional[Any] = pd.read_csv("sample_data.csv", header=None) snake_case_ : Union[str, Any] = df.shape[:1][0] # If you're using some other dataset input the target column snake_case_ : Dict = df.iloc[:, 1:2] snake_case_ : Optional[int] = actual_data.values.reshape(len_data, 1) snake_case_ : Union[str, Any] = MinMaxScaler().fit_transform(actual_data) snake_case_ : int = 10 snake_case_ : List[str] = 5 snake_case_ : str = 20 snake_case_ : Tuple = len_data - periods * look_back snake_case_ : Union[str, Any] = actual_data[:division] snake_case_ : int = actual_data[division - look_back :] snake_case_ : List[Any] = [], [] snake_case_ : Optional[Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) snake_case_ : Optional[Any] = np.array(train_x) snake_case_ : Optional[Any] = np.array(test_x) snake_case_ : str = np.array([list(i.ravel()) for i in train_y]) snake_case_ : int = np.array([list(i.ravel()) for i in test_y]) snake_case_ : int = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") snake_case_ : Optional[Any] = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) snake_case_ : Any = model.predict(x_test)
125
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging __a: List[str] = logging.get_logger(__name__) class UpperCAmelCase ( a__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = "encoder-decoder" SCREAMING_SNAKE_CASE = True def __init__( self , **__lowerCAmelCase ) -> int: super().__init__(**__lowerCAmelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowercase__ : Optional[int] = kwargs.pop('''encoder''' ) lowercase__ : Union[str, Any] = encoder_config.pop('''model_type''' ) lowercase__ : Any = kwargs.pop('''decoder''' ) lowercase__ : Any = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowercase__ : Union[str, Any] = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) lowercase__ : Optional[Any] = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) lowercase__ : Tuple = True @classmethod def _lowerCAmelCase( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowercase__ : Union[str, Any] = True lowercase__ : Any = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase ) def _lowerCAmelCase( self ) -> Any: lowercase__ : Any = copy.deepcopy(self.__dict__ ) lowercase__ : Optional[Any] = self.encoder.to_dict() lowercase__ : Tuple = self.decoder.to_dict() lowercase__ : Dict = self.__class__.model_type return output
198
0
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def __a ( __lowerCamelCase ): def wrapper(*__lowerCamelCase, **__lowerCamelCase ): UpperCAmelCase_ : Dict = timeit.default_timer() UpperCAmelCase_ : Any = func(*__lowerCamelCase, **__lowerCamelCase ) UpperCAmelCase_ : Dict = timeit.default_timer() - starttime return delta UpperCAmelCase_ : Dict = func.__name__ return wrapper def __a ( __lowerCamelCase, __lowerCamelCase=100, __lowerCamelCase=None ): UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : List[str] = seq_shapes or {} for i in range(__lowerCamelCase ): UpperCAmelCase_ : List[str] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__lowerCamelCase, _ArrayXD ): UpperCAmelCase_ : int = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__lowerCamelCase, datasets.Value ): if v.dtype == "string": UpperCAmelCase_ : Union[str, Any] = "The small grey turtle was surprisingly fast when challenged." else: UpperCAmelCase_ : Dict = np.random.randint(10, size=1 ).astype(v.dtype ).item() elif isinstance(__lowerCamelCase, datasets.Sequence ): while isinstance(__lowerCamelCase, datasets.Sequence ): UpperCAmelCase_ : Dict = v.feature UpperCAmelCase_ : str = seq_shapes[k] UpperCAmelCase_ : List[Any] = np.random.rand(*__lowerCamelCase ).astype(v.dtype ) UpperCAmelCase_ : Optional[Any] = data dummy_data.append((i, example) ) return dummy_data def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=100, __lowerCamelCase=None ): UpperCAmelCase_ : List[Any] = generate_examples(__lowerCamelCase, num_examples=__lowerCamelCase, seq_shapes=__lowerCamelCase ) with ArrowWriter(features=__lowerCamelCase, path=__lowerCamelCase ) as writer: for key, record in dummy_data: UpperCAmelCase_ : Optional[int] = features.encode_example(__lowerCamelCase ) writer.write(__lowerCamelCase ) UpperCAmelCase_ : Dict = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) UpperCAmelCase_ : Union[str, Any] = datasets.Dataset.from_file(filename=__lowerCamelCase, info=datasets.DatasetInfo(features=__lowerCamelCase ) ) return dataset
354
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _a = logging.get_logger(__name__) _a = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """detr""" SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = backbone_config.get("model_type" ) UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : int = use_timm_backbone UpperCAmelCase_ : int = backbone_config UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : int = num_queries UpperCAmelCase_ : Union[str, Any] = d_model UpperCAmelCase_ : str = encoder_ffn_dim UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : List[Any] = encoder_attention_heads UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim UpperCAmelCase_ : Optional[Any] = decoder_layers UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads UpperCAmelCase_ : Optional[int] = dropout UpperCAmelCase_ : List[str] = attention_dropout UpperCAmelCase_ : Any = activation_dropout UpperCAmelCase_ : str = activation_function UpperCAmelCase_ : Tuple = init_std UpperCAmelCase_ : Optional[Any] = init_xavier_std UpperCAmelCase_ : Optional[Any] = encoder_layerdrop UpperCAmelCase_ : Optional[int] = decoder_layerdrop UpperCAmelCase_ : Tuple = encoder_layers UpperCAmelCase_ : int = auxiliary_loss UpperCAmelCase_ : Optional[Any] = position_embedding_type UpperCAmelCase_ : Tuple = backbone UpperCAmelCase_ : Optional[int] = use_pretrained_backbone UpperCAmelCase_ : Dict = dilation # Hungarian matcher UpperCAmelCase_ : Union[str, Any] = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : str = mask_loss_coefficient UpperCAmelCase_ : Any = dice_loss_coefficient UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient UpperCAmelCase_ : List[str] = giou_loss_coefficient UpperCAmelCase_ : List[Any] = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.encoder_attention_heads @property def UpperCamelCase__ ( self ): """simple docstring""" return self.d_model @classmethod def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ): """simple docstring""" return cls(backbone_config=lowercase_ , **lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : str = self.__class__.model_type return output class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def UpperCamelCase__ ( self ): """simple docstring""" return 1E-5 @property def UpperCamelCase__ ( self ): """simple docstring""" return 12
23
0
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def lowercase_ ( _lowerCamelCase: Union[str, Any] ) -> int: '''simple docstring''' __lowerCamelCase : List[Any] = 384 __lowerCamelCase : int = 7 if "tiny" in model_name: __lowerCamelCase : Optional[Any] = 96 __lowerCamelCase : str = (2, 2, 6, 2) __lowerCamelCase : int = (3, 6, 12, 24) elif "small" in model_name: __lowerCamelCase : Optional[Any] = 96 __lowerCamelCase : str = (2, 2, 18, 2) __lowerCamelCase : Optional[Any] = (3, 6, 12, 24) elif "base" in model_name: __lowerCamelCase : Tuple = 128 __lowerCamelCase : List[str] = (2, 2, 18, 2) __lowerCamelCase : Optional[int] = (4, 8, 16, 32) __lowerCamelCase : Union[str, Any] = 12 __lowerCamelCase : List[Any] = 512 elif "large" in model_name: __lowerCamelCase : Union[str, Any] = 192 __lowerCamelCase : str = (2, 2, 18, 2) __lowerCamelCase : List[str] = (6, 12, 24, 48) __lowerCamelCase : Optional[Any] = 12 __lowerCamelCase : List[Any] = 768 # set label information __lowerCamelCase : Optional[int] = 150 __lowerCamelCase : int = "huggingface/label-files" __lowerCamelCase : Optional[int] = "ade20k-id2label.json" __lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) __lowerCamelCase : int = {int(_lowerCamelCase ): v for k, v in idalabel.items()} __lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} __lowerCamelCase : List[Any] = SwinConfig( embed_dim=_lowerCamelCase , depths=_lowerCamelCase , num_heads=_lowerCamelCase , window_size=_lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , ) __lowerCamelCase : Union[str, Any] = UperNetConfig( backbone_config=_lowerCamelCase , auxiliary_in_channels=_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , ) return config def lowercase_ ( _lowerCamelCase: Dict ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase : int = [] # fmt: off # stem rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Any , _lowerCamelCase: Dict ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase : Optional[int] = dct.pop(_lowerCamelCase ) __lowerCamelCase : Optional[Any] = val def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: int ) -> List[Any]: '''simple docstring''' __lowerCamelCase : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowerCamelCase : Optional[Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowerCamelCase : List[str] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) __lowerCamelCase : Any = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase : Tuple = in_proj_weight[:dim, :] __lowerCamelCase : Any = in_proj_bias[: dim] __lowerCamelCase : Dict = in_proj_weight[ dim : dim * 2, : ] __lowerCamelCase : Optional[int] = in_proj_bias[ dim : dim * 2 ] __lowerCamelCase : Optional[Any] = in_proj_weight[ -dim :, : ] __lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :] # fmt: on def lowercase_ ( _lowerCamelCase: int ) -> List[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase : str = x.shape __lowerCamelCase : Any = x.reshape(_lowerCamelCase , 4 , in_channel // 4 ) __lowerCamelCase : Tuple = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_lowerCamelCase , _lowerCamelCase ) return x def lowercase_ ( _lowerCamelCase: Union[str, Any] ) -> str: '''simple docstring''' __lowerCamelCase , __lowerCamelCase : str = x.shape __lowerCamelCase : List[str] = x.reshape(_lowerCamelCase , in_channel // 4 , 4 ) __lowerCamelCase : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_lowerCamelCase , _lowerCamelCase ) return x def lowercase_ ( _lowerCamelCase: Any ) -> int: '''simple docstring''' __lowerCamelCase : str = x.shape[0] __lowerCamelCase : int = x.reshape(4 , in_channel // 4 ) __lowerCamelCase : Optional[int] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_lowerCamelCase ) return x def lowercase_ ( _lowerCamelCase: str ) -> str: '''simple docstring''' __lowerCamelCase : List[str] = x.shape[0] __lowerCamelCase : Union[str, Any] = x.reshape(in_channel // 4 , 4 ) __lowerCamelCase : int = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_lowerCamelCase ) return x def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Any , _lowerCamelCase: int ) -> List[Any]: '''simple docstring''' __lowerCamelCase : List[Any] = { "upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth", "upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth", "upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth", "upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth", } __lowerCamelCase : str = model_name_to_url[model_name] __lowerCamelCase : int = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" , file_name=_lowerCamelCase )[ "state_dict" ] for name, param in state_dict.items(): print(_lowerCamelCase , param.shape ) __lowerCamelCase : Dict = get_upernet_config(_lowerCamelCase ) __lowerCamelCase : List[Any] = UperNetForSemanticSegmentation(_lowerCamelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __lowerCamelCase : Optional[Any] = state_dict.pop(_lowerCamelCase ) if "bn" in key: __lowerCamelCase : Any = key.replace("bn" , "batch_norm" ) __lowerCamelCase : Dict = val # rename keys __lowerCamelCase : Dict = create_rename_keys(_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) read_in_q_k_v(_lowerCamelCase , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: __lowerCamelCase : Any = reverse_correct_unfold_reduction_order(_lowerCamelCase ) if "norm" in key: __lowerCamelCase : List[str] = reverse_correct_unfold_norm_order(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) # verify on image __lowerCamelCase : str = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" __lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" ) __lowerCamelCase : int = SegformerImageProcessor() __lowerCamelCase : int = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values with torch.no_grad(): __lowerCamelCase : Union[str, Any] = model(_lowerCamelCase ) __lowerCamelCase : str = outputs.logits print(logits.shape ) print("First values of logits:" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": __lowerCamelCase : Union[str, Any] = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": __lowerCamelCase : Optional[int] = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": __lowerCamelCase : Optional[int] = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": __lowerCamelCase : Union[str, Any] = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCamelCase ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[F"""upernet-swin-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __A = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
135
"""simple docstring""" import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _snake_case ( a__ ): snake_case__ = (KDPMaDiscreteScheduler,) snake_case__ = 10 def lowerCamelCase__ ( self : str , **UpperCAmelCase : Dict ): __lowerCamelCase : Union[str, Any] = { "num_train_timesteps": 1100, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", } config.update(**UpperCAmelCase ) return config def lowerCamelCase__ ( self : Tuple ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase ) def lowerCamelCase__ ( self : int ): for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase ) def lowerCamelCase__ ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCAmelCase ) def lowerCamelCase__ ( self : Dict ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase ) def lowerCamelCase__ ( self : str ): __lowerCamelCase : List[str] = self.scheduler_classes[0] __lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="v_prediction" ) __lowerCamelCase : Union[str, Any] = scheduler_class(**UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) __lowerCamelCase : int = self.dummy_model() __lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __lowerCamelCase : str = sample.to(UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): __lowerCamelCase : str = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : Dict = model(UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : List[str] = output.prev_sample __lowerCamelCase : Optional[Any] = torch.sum(torch.abs(UpperCAmelCase ) ) __lowerCamelCase : List[Any] = torch.mean(torch.abs(UpperCAmelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2 assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2 assert abs(result_mean.item() - 0.0_0_0_2 ) < 1E-3 def lowerCamelCase__ ( self : Any ): if torch_device == "mps": return __lowerCamelCase : Dict = self.scheduler_classes[0] __lowerCamelCase : Tuple = self.get_scheduler_config() __lowerCamelCase : Optional[Any] = scheduler_class(**UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) __lowerCamelCase : Optional[int] = self.dummy_model() __lowerCamelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma __lowerCamelCase : str = sample.to(UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): __lowerCamelCase : Optional[int] = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : int = model(UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : List[str] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : Any = output.prev_sample __lowerCamelCase : Optional[int] = torch.sum(torch.abs(UpperCAmelCase ) ) __lowerCamelCase : List[Any] = torch.mean(torch.abs(UpperCAmelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3 def lowerCamelCase__ ( self : Dict ): if torch_device == "mps": return __lowerCamelCase : Tuple = self.scheduler_classes[0] __lowerCamelCase : Optional[Any] = self.get_scheduler_config() __lowerCamelCase : List[Any] = scheduler_class(**UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase ) __lowerCamelCase : Optional[int] = self.dummy_model() __lowerCamelCase : Union[str, Any] = self.dummy_sample_deter.to(UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowerCamelCase : Optional[int] = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : str = model(UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : Union[str, Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : Tuple = output.prev_sample __lowerCamelCase : List[str] = torch.sum(torch.abs(UpperCAmelCase ) ) __lowerCamelCase : str = torch.mean(torch.abs(UpperCAmelCase ) ) if str(UpperCAmelCase ).startswith("cpu" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
135
1
"""simple docstring""" from __future__ import annotations def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ) -> tuple[str, float]: if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
350
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase__ = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } UpperCAmelCase__ = { 'bert-base-uncased': 512, 'bert-large-uncased': 512, 'bert-base-cased': 512, 'bert-large-cased': 512, 'bert-base-multilingual-uncased': 512, 'bert-base-multilingual-cased': 512, 'bert-base-chinese': 512, 'bert-base-german-cased': 512, 'bert-large-uncased-whole-word-masking': 512, 'bert-large-cased-whole-word-masking': 512, 'bert-large-uncased-whole-word-masking-finetuned-squad': 512, 'bert-large-cased-whole-word-masking-finetuned-squad': 512, 'bert-base-cased-finetuned-mrpc': 512, 'bert-base-german-dbmdz-cased': 512, 'bert-base-german-dbmdz-uncased': 512, 'TurkuNLP/bert-base-finnish-cased-v1': 512, 'TurkuNLP/bert-base-finnish-uncased-v1': 512, 'wietsedv/bert-base-dutch-cased': 512, } UpperCAmelCase__ = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class lowerCAmelCase__ ( A_ ): __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_INIT_CONFIGURATION __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = BertTokenizer def __init__( self : Optional[int] , _lowerCamelCase : int=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Any="[SEP]" , _lowerCamelCase : Any="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Dict="[MASK]" , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Any=None , **_lowerCamelCase : int , ): super().__init__( _lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , ) _snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars ): _snake_case = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) ) _snake_case = do_lower_case _snake_case = strip_accents _snake_case = tokenize_chinese_chars _snake_case = normalizer_class(**_lowerCamelCase ) _snake_case = do_lower_case def lowercase ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=None ): _snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): _snake_case = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase )
40
0
from __future__ import annotations from typing import Any class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> None: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = row, column _UpperCAmelCase = [[default_value for c in range(_SCREAMING_SNAKE_CASE )] for r in range(_SCREAMING_SNAKE_CASE )] def __str__( self ) -> str: """simple docstring""" _UpperCAmelCase = f'''Matrix consist of {self.row} rows and {self.column} columns\n''' # Make string identifier _UpperCAmelCase = 0 for row_vector in self.array: for obj in row_vector: _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , len(str(_SCREAMING_SNAKE_CASE ) ) ) _UpperCAmelCase = f'''%{max_element_length}s''' # Make string and return def single_line(_SCREAMING_SNAKE_CASE ) -> str: nonlocal string_format_identifier _UpperCAmelCase = '[' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(_SCREAMING_SNAKE_CASE ) for row_vector in self.array ) return s def __repr__( self ) -> str: """simple docstring""" return str(self ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" if not (isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and len(_SCREAMING_SNAKE_CASE ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" assert self.validate_indicies(_SCREAMING_SNAKE_CASE ) return self.array[loc[0]][loc[1]] def __setitem__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" assert self.validate_indicies(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = value def __add__( self , _SCREAMING_SNAKE_CASE ) -> Matrix: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert self.row == another.row and self.column == another.column # Add _UpperCAmelCase = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _UpperCAmelCase = self[r, c] + another[r, c] return result def __neg__( self ) -> Matrix: """simple docstring""" _UpperCAmelCase = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _UpperCAmelCase = -self[r, c] return result def __sub__( self , _SCREAMING_SNAKE_CASE ) -> Matrix: """simple docstring""" return self + (-another) def __mul__( self , _SCREAMING_SNAKE_CASE ) -> Matrix: """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ): # Scalar multiplication _UpperCAmelCase = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _UpperCAmelCase = self[r, c] * another return result elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Matrix multiplication assert self.column == another.row _UpperCAmelCase = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: _UpperCAmelCase = f'''Unsupported type given for another ({type(_SCREAMING_SNAKE_CASE )})''' raise TypeError(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Matrix: """simple docstring""" _UpperCAmelCase = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): _UpperCAmelCase = self[r, c] return result def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate _UpperCAmelCase = v.transpose() _UpperCAmelCase = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowerCAmelCase__ ( ) -> None: '''simple docstring''' _UpperCAmelCase = Matrix(3 , 3 , 0 ) for i in range(3 ): _UpperCAmelCase = 1 print(F'''a^(-1) is {ainv}''' ) # u, v _UpperCAmelCase = Matrix(3 , 1 , 0 ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1, 2, -3 _UpperCAmelCase = Matrix(3 , 1 , 0 ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 4, -2, 5 print(F'''u is {u}''' ) print(F'''v is {v}''' ) print(F'''uv^T is {u * v.transpose()}''' ) # Sherman Morrison print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(a__ , a__ )}''' ) def lowerCAmelCase__ ( ) -> None: '''simple docstring''' import doctest doctest.testmod() testa()
329
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
1
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" __a = """vision-encoder-decoder""" __a = True def __init__( self : Union[str, Any] , **UpperCamelCase : Optional[int] ): '''simple docstring''' super().__init__(**UpperCamelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'''A configuraton of type {self.model_type} cannot be instantiated because ''' f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) __UpperCAmelCase : List[str] = kwargs.pop("""encoder""" ) __UpperCAmelCase : Union[str, Any] = encoder_config.pop("""model_type""" ) __UpperCAmelCase : List[Any] = kwargs.pop("""decoder""" ) __UpperCAmelCase : Any = decoder_config.pop("""model_type""" ) __UpperCAmelCase : int = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : List[str] = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Optional[int] = True @classmethod def lowerCamelCase__ ( cls : Optional[Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : PretrainedConfig , **UpperCamelCase : List[Any] ): '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) __UpperCAmelCase : List[str] = True __UpperCAmelCase : Dict = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ ) __UpperCAmelCase : Union[str, Any] = self.encoder.to_dict() __UpperCAmelCase : Optional[int] = self.decoder.to_dict() __UpperCAmelCase : List[Any] = self.__class__.model_type return output class lowerCamelCase__ ( A ): """simple docstring""" __a = version.parse("""1.11""" ) @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase__ ( self : int ): '''simple docstring''' return 1e-4 @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class lowerCamelCase__ ( A ): """simple docstring""" @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = OrderedDict() __UpperCAmelCase : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __UpperCAmelCase : int = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __UpperCAmelCase : List[str] = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : "PreTrainedTokenizerBase" , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional["TensorType"] = None , ): '''simple docstring''' import torch __UpperCAmelCase : Union[str, Any] = OrderedDict() __UpperCAmelCase : List[str] = super().generate_dummy_inputs( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : str = dummy_input["""input_ids"""].shape __UpperCAmelCase : Union[str, Any] = (batch, encoder_sequence, self._config.encoder_hidden_size) __UpperCAmelCase : Any = dummy_input.pop("""input_ids""" ) __UpperCAmelCase : Dict = dummy_input.pop("""attention_mask""" ) __UpperCAmelCase : Optional[Any] = torch.zeros(UpperCamelCase ) return common_inputs class lowerCamelCase__ ( A ): """simple docstring""" @property def lowerCamelCase__ ( self : str ): '''simple docstring''' pass def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : PretrainedConfig , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" ): '''simple docstring''' __UpperCAmelCase : Dict = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(UpperCamelCase , UpperCamelCase )
320
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , **_UpperCamelCase ) -> Optional[int]: super().__init__(**_UpperCamelCase ) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) requires_backends(self , 'vision' ) self.check_model_type(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> Any: if "text_queries" in kwargs: UpperCAmelCase_ : Tuple = kwargs.pop('text_queries' ) if isinstance(_UpperCamelCase , (str, Image.Image) ): UpperCAmelCase_ : Optional[Any] = {'image': image, 'candidate_labels': candidate_labels} else: UpperCAmelCase_ : Union[str, Any] = image UpperCAmelCase_ : Tuple = super().__call__(_UpperCamelCase , **_UpperCamelCase ) return results def __UpperCAmelCase ( self , **_UpperCamelCase ) -> str: UpperCAmelCase_ : Tuple = {} if "threshold" in kwargs: UpperCAmelCase_ : str = kwargs['threshold'] if "top_k" in kwargs: UpperCAmelCase_ : str = kwargs['top_k'] return {}, {}, postprocess_params def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = load_image(inputs['image'] ) UpperCAmelCase_ : List[Any] = inputs['candidate_labels'] if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Optional[Any] = candidate_labels.split(',' ) UpperCAmelCase_ : Optional[int] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(_UpperCamelCase ): UpperCAmelCase_ : Tuple = self.tokenizer(_UpperCamelCase , return_tensors=self.framework ) UpperCAmelCase_ : Union[str, Any] = self.image_processor(_UpperCamelCase , return_tensors=self.framework ) yield { "is_last": i == len(_UpperCamelCase ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]: UpperCAmelCase_ : List[str] = model_inputs.pop('target_size' ) UpperCAmelCase_ : Dict = model_inputs.pop('candidate_label' ) UpperCAmelCase_ : Any = model_inputs.pop('is_last' ) UpperCAmelCase_ : Tuple = self.model(**_UpperCamelCase ) UpperCAmelCase_ : Any = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs} return model_outputs def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0.1 , _UpperCamelCase=None ) -> Tuple: UpperCAmelCase_ : Optional[Any] = [] for model_output in model_outputs: UpperCAmelCase_ : Optional[Any] = model_output['candidate_label'] UpperCAmelCase_ : List[str] = BaseModelOutput(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = self.image_processor.post_process_object_detection( outputs=_UpperCamelCase , threshold=_UpperCamelCase , target_sizes=model_output['target_size'] )[0] for index in outputs["scores"].nonzero(): UpperCAmelCase_ : List[Any] = outputs['scores'][index].item() UpperCAmelCase_ : Dict = self._get_bounding_box(outputs['boxes'][index][0] ) UpperCAmelCase_ : List[Any] = {'score': score, 'label': label, 'box': box} results.append(_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x["score"] , reverse=_UpperCamelCase ) if top_k: UpperCAmelCase_ : List[str] = results[:top_k] return results def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, int]: if self.framework != "pt": raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = box.int().tolist() UpperCAmelCase_ : Optional[Any] = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
29
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ): """simple docstring""" set_seed(3 ) # generate train_data and objective_set SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets( a__ , a__ , number=a__ , min_len=1_026 , trim=a__ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # load pretrained model SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ ) print('''computing perplexity on objective set''' ) SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item() print('''perplexity on objective set:''' , a__ ) # collect igf pairs and save to file demo.jbl collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ): """simple docstring""" set_seed(42 ) # Load pre-trained model SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' ) # Initialize secondary learner to use embedding weights of model SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ ) # Train secondary learner SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner( a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ ) SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ ) SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1 SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ ) model.train() if secondary_learner is not None: secondary_learner.to(a__ ) secondary_learner.eval() SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Tuple = [] # Compute the performance of the transformer model at the beginning SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ ) test_perps.append(a__ ) print('''Test perplexity, step''' , a__ , ''':''' , a__ ) for epoch in range(int(a__ ) ): for step, example in enumerate(a__ ): torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 ) SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = True if secondary_learner is not None: SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward( torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(a__ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: SCREAMING_SNAKE_CASE : str = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) SCREAMING_SNAKE_CASE : List[str] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ ) test_perps.append(a__ ) print('''Test perplexity, step''' , a__ , ''':''' , a__ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , a__ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def UpperCAmelCase_( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' ) # Required parameters parser.add_argument( '''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , ) parser.add_argument( '''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--data_file''' , type=a__ , default=a__ , help=( '''A jbl file containing tokenized data which can be split as objective dataset, ''' '''train_dataset and test_dataset.''' ) , ) parser.add_argument( '''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , ) parser.add_argument( '''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , ) parser.add_argument( '''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' ) parser.add_argument( '''--context_len''' , default=32 , type=a__ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , ) parser.add_argument( '''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' ) parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' ) parser.add_argument( '''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , ) parser.add_argument( '''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' ) parser.add_argument( '''--eval_interval''' , default=10 , type=a__ , help=( '''decay the selectivity of our secondary learner filter from''' '''1 standard deviation above average to 1 below average after 10 batches''' ) , ) parser.add_argument( '''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' ) parser.add_argument( '''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' ) parser.add_argument( '''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' ) parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' ) parser.add_argument( '''--threshold''' , default=1.0 , type=a__ , help=( '''The threshold value used by secondary learner to filter the train_data and allow only''' ''' informative data as input to the model''' ) , ) parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' ) parser.add_argument( '''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , ) # Load train data for secondary learner SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' ) # Train secondary learner SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner( a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , ) # load pretrained gpt2 model SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets( context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , ) if __name__ == "__main__": main()
313
0
"""simple docstring""" import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class lowerCamelCase__ ( unittest.TestCase ): def __init__( self ,A ,A=7 ,A=3 ,A=18 ,A=30 ,A=400 ,A=True ,A=None ,A=True ,A=[0.5, 0.5, 0.5] ,A=[0.5, 0.5, 0.5] ,): UpperCAmelCase = size if size is not None else {"""height""": 18, """width""": 18} UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = num_channels UpperCAmelCase = image_size UpperCAmelCase = min_resolution UpperCAmelCase = max_resolution UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_normalize UpperCAmelCase = image_mean UpperCAmelCase = image_std def _UpperCamelCase ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowerCamelCase__ ( snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = DPTImageProcessor if is_vision_available() else None def _UpperCamelCase ( self ): UpperCAmelCase = DPTImageProcessingTester(self ) @property def _UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase ( self ): UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"""image_mean""" ) ) self.assertTrue(hasattr(A ,"""image_std""" ) ) self.assertTrue(hasattr(A ,"""do_normalize""" ) ) self.assertTrue(hasattr(A ,"""do_resize""" ) ) self.assertTrue(hasattr(A ,"""size""" ) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} ) UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} ) def _UpperCamelCase ( self ): # Initialize image_processing UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def _UpperCamelCase ( self ): # Initialize image_processing UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def _UpperCamelCase ( self ): # Initialize image_processing UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,)
234
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
234
1
import math def __UpperCamelCase ( _A ): return math.sqrt(_A ) * math.sqrt(_A ) == num def __UpperCamelCase ( _A ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
278
import argparse from collections import defaultdict import yaml _A = '''docs/source/en/_toctree.yml''' def __UpperCamelCase ( _A ): lowerCAmelCase_ = defaultdict(_A ) for doc in model_doc: counts[doc["local"]] += 1 lowerCAmelCase_ = [key for key, value in counts.items() if value > 1] lowerCAmelCase_ = [] for duplicate_key in duplicates: lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(_A ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(_A , key=lambda _A : s["title"].lower() ) def __UpperCamelCase ( _A=False ): with open(_A , encoding='''utf-8''' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['''sections'''] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowerCAmelCase_ = api_doc[model_idx]['''sections'''] lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section] lowerCAmelCase_ = False for idx, modality_doc in modalities_docs: lowerCAmelCase_ = modality_doc['''sections'''] lowerCAmelCase_ = clean_model_doc_toc(_A ) if old_modality_doc != new_modality_doc: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_modality_doc if diff: if overwrite: lowerCAmelCase_ = model_doc lowerCAmelCase_ = api_doc with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _A = parser.parse_args() check_model_doc(args.fix_and_overwrite)
278
1
import warnings warnings.warn( '''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: ''' '''`from accelerate import find_executable_batch_size` to avoid this warning.''', FutureWarning, )
362
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def lowerCAmelCase__ ( a__: str , a__: List[Any] ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' ) _UpperCAmelCase = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ), ] ) _UpperCAmelCase = transform(a__ ).unsqueeze(0 ).to(a__ ) return image def lowerCAmelCase__ ( a__: Optional[int] ) -> int: '''simple docstring''' if "visual_encoder" in key: _UpperCAmelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , a__ ) if "blocks" in key: _UpperCAmelCase = re.sub(R'blocks' , 'layers' , a__ ) if "attn" in key: _UpperCAmelCase = re.sub(R'attn' , 'self_attn' , a__ ) if "norm1" in key: _UpperCAmelCase = re.sub(R'norm1' , 'layer_norm1' , a__ ) if "norm2" in key: _UpperCAmelCase = re.sub(R'norm2' , 'layer_norm2' , a__ ) if "encoder.norm" in key: _UpperCAmelCase = re.sub(R'encoder.norm' , 'post_layernorm' , a__ ) if "encoder.patch_embed.proj" in key: _UpperCAmelCase = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , a__ ) if "encoder.pos_embed" in key: _UpperCAmelCase = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , a__ ) if "encoder.cls_token" in key: _UpperCAmelCase = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , a__ ) if "self_attn" in key: _UpperCAmelCase = re.sub(R'self_attn.proj' , 'self_attn.projection' , a__ ) return key @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[Any] , a__: List[str]=None ) -> Optional[Any]: '''simple docstring''' if config_path is not None: _UpperCAmelCase = BlipConfig.from_pretrained(a__ ) else: _UpperCAmelCase = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} ) _UpperCAmelCase = BlipForConditionalGeneration(a__ ).eval() _UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' _UpperCAmelCase = blip_decoder(pretrained=a__ , image_size=3_8_4 , vit='base' ) _UpperCAmelCase = pt_model.eval() _UpperCAmelCase = pt_model.state_dict() for key in modified_state_dict.copy(): _UpperCAmelCase = modified_state_dict.pop(a__ ) _UpperCAmelCase = rename_key(a__ ) _UpperCAmelCase = value hf_model.load_state_dict(a__ ) _UpperCAmelCase = 3_8_4 _UpperCAmelCase = load_demo_image(image_size=a__ , device='cpu' ) _UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' ) _UpperCAmelCase = tokenizer(['a picture of'] ).input_ids _UpperCAmelCase = hf_model.generate(a__ , a__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] _UpperCAmelCase = hf_model.generate(a__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(a__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' _UpperCAmelCase = ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) _UpperCAmelCase = blip_vqa(pretrained=a__ , image_size=a__ , vit='base' ) vqa_model.eval() _UpperCAmelCase = vqa_model.state_dict() for key in modified_state_dict.copy(): _UpperCAmelCase = modified_state_dict.pop(a__ ) _UpperCAmelCase = rename_key(a__ ) _UpperCAmelCase = value _UpperCAmelCase = BlipForQuestionAnswering(a__ ) hf_vqa_model.load_state_dict(a__ ) _UpperCAmelCase = ['How many dogs are in this image?'] _UpperCAmelCase = tokenizer(a__ , return_tensors='pt' ).input_ids _UpperCAmelCase = hf_vqa_model.generate(a__ , a__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) _UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' _UpperCAmelCase = blip_itm(pretrained=a__ , image_size=a__ , vit='base' ) itm_model.eval() _UpperCAmelCase = itm_model.state_dict() for key in modified_state_dict.copy(): _UpperCAmelCase = modified_state_dict.pop(a__ ) _UpperCAmelCase = rename_key(a__ ) _UpperCAmelCase = value _UpperCAmelCase = BlipForImageTextRetrieval(a__ ) _UpperCAmelCase = ['A picture of a woman with a dog sitting in a beach'] _UpperCAmelCase = tokenizer( a__ , return_tensors='pt' , padding='max_length' , truncation=a__ , max_length=3_5 , ).input_ids hf_itm_model.load_state_dict(a__ ) hf_itm_model.eval() _UpperCAmelCase = hf_itm_model(a__ , a__ , use_itm_head=a__ ) _UpperCAmelCase = hf_itm_model(a__ , a__ , use_itm_head=a__ ) assert out[0].item() == 0.2_110_687_494_277_954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": lowerCAmelCase__ :int = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCAmelCase__ :List[Any] = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
185
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[Any] = """levit""" def __init__( self , snake_case__=224 , snake_case__=3 , snake_case__=3 , snake_case__=2 , snake_case__=1 , snake_case__=16 , snake_case__=[128, 256, 384] , snake_case__=[4, 8, 12] , snake_case__=[4, 4, 4] , snake_case__=[16, 16, 16] , snake_case__=0 , snake_case__=[2, 2, 2] , snake_case__=[2, 2, 2] , snake_case__=0.02 , **snake_case__ , ) -> List[str]: '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase : Any =image_size UpperCAmelCase : List[Any] =num_channels UpperCAmelCase : Any =kernel_size UpperCAmelCase : Optional[Any] =stride UpperCAmelCase : int =padding UpperCAmelCase : Dict =hidden_sizes UpperCAmelCase : List[str] =num_attention_heads UpperCAmelCase : Union[str, Any] =depths UpperCAmelCase : Optional[Any] =key_dim UpperCAmelCase : Optional[int] =drop_path_rate UpperCAmelCase : str =patch_size UpperCAmelCase : Optional[Any] =attention_ratio UpperCAmelCase : Union[str, Any] =mlp_ratio UpperCAmelCase : Optional[Any] =initializer_range UpperCAmelCase : Union[str, Any] =[ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = version.parse("""1.11""" ) @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self ) -> float: '''simple docstring''' return 1e-4
348
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case : def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple: '''simple docstring''' UpperCAmelCase : List[Any] =parent UpperCAmelCase : Optional[int] =batch_size UpperCAmelCase : List[Any] =seq_length UpperCAmelCase : Optional[int] =is_training UpperCAmelCase : Union[str, Any] =use_input_mask UpperCAmelCase : Tuple =use_labels UpperCAmelCase : Union[str, Any] =vocab_size UpperCAmelCase : Tuple =hidden_size UpperCAmelCase : Dict =projection_dim UpperCAmelCase : Optional[int] =num_hidden_layers UpperCAmelCase : Dict =num_attention_heads UpperCAmelCase : int =intermediate_size UpperCAmelCase : Any =dropout UpperCAmelCase : Union[str, Any] =attention_dropout UpperCAmelCase : Union[str, Any] =max_position_embeddings UpperCAmelCase : List[str] =initializer_range UpperCAmelCase : str =scope UpperCAmelCase : str =bos_token_id def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : int =None if self.use_input_mask: UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Optional[int] =input_mask.numpy() UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case__ ): UpperCAmelCase : List[Any] =1 UpperCAmelCase : Tuple =0 UpperCAmelCase : List[Any] =self.get_config() return config, input_ids, tf.convert_to_tensor(snake_case__ ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ ) UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ ) UpperCAmelCase : str =model(snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] =self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __snake_case ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else () __lowerCamelCase : Dict = False __lowerCamelCase : Optional[Any] = False __lowerCamelCase : Dict = False def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : str =BlipTextModelTester(self ) UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' pass @slow def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def UpperCAmelCase__ ( self , snake_case__=True ) -> Any: '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
348
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """tokenizer_file""": { """bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""", """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""", }, } class A__ ( __magic_name__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = ['input_ids', 'attention_mask'] lowercase = None def __init__( self : Optional[Any] , a : Tuple=None , a : int=None , a : int=None , a : Any="<unk>" , a : Tuple="<s>" , a : Optional[Any]="</s>" , a : Optional[int]="<pad>" , a : Dict=False , a : Union[str, Any]=False , **a : List[Any] , ): '''simple docstring''' super().__init__( a , a , tokenizer_file=a , unk_token=a , bos_token=a , eos_token=a , pad_token=a , add_prefix_space=a , clean_up_tokenization_spaces=a , **a , ) lowerCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , a ) != add_prefix_space: lowerCAmelCase__ : Any = getattr(a , pre_tok_state.pop('type' ) ) lowerCAmelCase__ : Union[str, Any] = add_prefix_space lowerCAmelCase__ : Dict = pre_tok_class(**a ) lowerCAmelCase__ : List[str] = add_prefix_space def _lowerCamelCase ( self : List[str] , *a : List[Any] , **a : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = kwargs.get('is_split_into_words' , a ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.' ) return super()._batch_encode_plus(*a , **a ) def _lowerCamelCase ( self : List[str] , *a : Optional[Any] , **a : Tuple ): '''simple docstring''' lowerCAmelCase__ : int = kwargs.get('is_split_into_words' , a ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.' ) return super()._encode_plus(*a , **a ) def _lowerCamelCase ( self : Any , a : str , a : Optional[str] = None ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self._tokenizer.model.save(a , name=a ) return tuple(a ) def _lowerCamelCase ( self : List[Any] , a : "Conversation" ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(a , add_special_tokens=a ) + [self.eos_token_id] ) if len(a ) > self.model_max_length: lowerCAmelCase__ : Any = input_ids[-self.model_max_length :] return input_ids
307
import os import string import sys lowerCamelCase__ = 1 << 8 lowerCamelCase__ = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } lowerCamelCase__ = KEYMAP["""up"""] lowerCamelCase__ = KEYMAP["""left"""] if sys.platform == "win32": lowerCamelCase__ = [] lowerCamelCase__ = { b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): lowerCamelCase__ = ord(str(i)) def lowerCAmelCase__ ( ) -> Dict: if os.name == "nt": import msvcrt lowerCAmelCase__ : Dict = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(SCREAMING_SNAKE_CASE_ ) == 0: # Read the keystroke lowerCAmelCase__ : Optional[Any] = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowerCAmelCase__ : Dict = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ ) if ord(SCREAMING_SNAKE_CASE_ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] ) except KeyError: lowerCAmelCase__ : Dict = cha[1] else: lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ ) else: lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowerCAmelCase__ : Tuple = sys.stdin.fileno() lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ ) try: tty.setraw(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 ) finally: termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ ) return ch def lowerCAmelCase__ ( ) -> Union[str, Any]: lowerCAmelCase__ : Any = get_raw_chars() if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]: lowerCAmelCase__ : Union[str, Any] = get_raw_chars() if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]: lowerCAmelCase__ : str = get_raw_chars() if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
307
1
"""simple docstring""" import logging import os from .state import PartialState class A__ ( logging.LoggerAdapter ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: Any) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Tuple = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , *_SCREAMING_SNAKE_CASE: Tuple , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Dict: """simple docstring""" if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.") __lowerCAmelCase : int = kwargs.pop("main_process_only" , _SCREAMING_SNAKE_CASE) __lowerCAmelCase : str = kwargs.pop("in_order" , _SCREAMING_SNAKE_CASE) if self.isEnabledFor(_SCREAMING_SNAKE_CASE): if self._should_log(_SCREAMING_SNAKE_CASE): __lowerCAmelCase , __lowerCAmelCase : Tuple = self.process(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) self.logger.log(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) elif in_order: __lowerCAmelCase : List[Any] = PartialState() for i in range(state.num_processes): if i == state.process_index: __lowerCAmelCase , __lowerCAmelCase : str = self.process(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) self.logger.log(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) state.wait_for_everyone() def _lowercase ( __snake_case ,__snake_case = None ) -> Tuple: if log_level is None: __lowerCAmelCase : Optional[Any] = os.environ.get("ACCELERATE_LOG_LEVEL" ,__snake_case ) __lowerCAmelCase : Tuple = logging.getLogger(__snake_case ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(__snake_case ,{} )
269
"""simple docstring""" from __future__ import annotations def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list: __lowerCAmelCase : Dict = [] __lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __lowerCAmelCase : int = result + left + right return input_list def _lowercase ( __snake_case ) -> list: if len(__snake_case ) <= 1: return input_list __lowerCAmelCase : int = list(__snake_case ) # iteration for two-way merging __lowerCAmelCase : Optional[int] = 2 while p <= len(__snake_case ): # getting low, high and middle value for merge-sort of single list for i in range(0 ,len(__snake_case ) ,__snake_case ): __lowerCAmelCase : Union[str, Any] = i __lowerCAmelCase : Tuple = i + p - 1 __lowerCAmelCase : Optional[Any] = (low + high + 1) // 2 __lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case ) # final merge of last two parts if p * 2 >= len(__snake_case ): __lowerCAmelCase : Optional[Any] = i __lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip() if user_input == "": __snake_case : Optional[int] = [] else: __snake_case : int = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
269
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase )-> bool: UpperCamelCase = 0 for ch in input_str: UpperCamelCase = ord(__UpperCamelCase ) UpperCamelCase = pow(2 , __UpperCamelCase ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
371
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { 'configuration_mobilebert': [ 'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileBertConfig', 'MobileBertOnnxConfig', ], 'tokenization_mobilebert': ['MobileBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['MobileBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileBertForMaskedLM', 'MobileBertForMultipleChoice', 'MobileBertForNextSentencePrediction', 'MobileBertForPreTraining', 'MobileBertForQuestionAnswering', 'MobileBertForSequenceClassification', 'MobileBertForTokenClassification', 'MobileBertLayer', 'MobileBertModel', 'MobileBertPreTrainedModel', 'load_tf_weights_in_mobilebert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileBertForMaskedLM', 'TFMobileBertForMultipleChoice', 'TFMobileBertForNextSentencePrediction', 'TFMobileBertForPreTraining', 'TFMobileBertForQuestionAnswering', 'TFMobileBertForSequenceClassification', 'TFMobileBertForTokenClassification', 'TFMobileBertMainLayer', 'TFMobileBertModel', 'TFMobileBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
183
0
'''simple docstring''' def UpperCamelCase_ ( snake_case_ : int ) -> list: '''simple docstring''' if bit_count < 0: raise ValueError("""The given input must be positive""" ) # get the generated string sequence __lowerCAmelCase = gray_code_sequence_string(_lowerCAmelCase ) # # convert them to integers for i in range(len(_lowerCAmelCase ) ): __lowerCAmelCase = int(sequence[i] , 2 ) return sequence def UpperCamelCase_ ( snake_case_ : int ) -> list: '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __lowerCAmelCase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __lowerCAmelCase = gray_code_sequence_string(bit_count - 1 ) __lowerCAmelCase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __lowerCAmelCase = '''0''' + smaller_sequence[i] sequence.append(_lowerCAmelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __lowerCAmelCase = '''1''' + smaller_sequence[i] sequence.append(_lowerCAmelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
229
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__: str = { "configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"], "tokenization_lxmert": ["LxmertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__: int = ["LxmertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__: Union[str, Any] = [ "LxmertEncoder", "LxmertForPreTraining", "LxmertForQuestionAnswering", "LxmertModel", "LxmertPreTrainedModel", "LxmertVisualFeatureEncoder", "LxmertXLayer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__: int = [ "TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLxmertForPreTraining", "TFLxmertMainLayer", "TFLxmertModel", "TFLxmertPreTrainedModel", "TFLxmertVisualFeatureEncoder", ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys UpperCamelCase__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
23
0
"""simple docstring""" import math import sys import cva import numpy as np def __lowercase ( _a , _a ): # For applying gaussian function for each element in matrix. snake_case_ : List[Any] = math.sqrt(_a ) snake_case_ : Optional[Any] = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __lowercase ( _a , _a , _a , _a ): snake_case_ : str = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __lowercase ( _a , _a ): # Creates a gaussian kernel of given dimension. snake_case_ : Optional[Any] = np.zeros((kernel_size, kernel_size) ) for i in range(0 , _a ): for j in range(0 , _a ): snake_case_ : List[Any] = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(_a , _a ) def __lowercase ( _a , _a , _a , _a , ): snake_case_ : List[str] = np.zeros(img.shape ) snake_case_ : Optional[Any] = get_gauss_kernel(_a , _a ) snake_case_, snake_case_ : int = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): snake_case_ : Tuple = get_slice(_a , _a , _a , _a ) snake_case_ : int = img_s - img_s[kernel_size // 2, kernel_size // 2] snake_case_ : Optional[int] = vec_gaussian(_a , _a ) snake_case_ : List[Any] = np.multiply(_a , _a ) snake_case_ : List[str] = np.multiply(_a , _a ) snake_case_ : List[str] = np.sum(_a ) / np.sum(_a ) snake_case_ : Tuple = val return imga def __lowercase ( _a ): snake_case_ : int = args[1] if args[1:] else '''../image_data/lena.jpg''' snake_case_ : List[str] = float(args[2] ) if args[2:] else 1.0 snake_case_ : List[str] = float(args[3] ) if args[3:] else 1.0 if args[4:]: snake_case_ : Optional[int] = int(args[4] ) snake_case_ : Dict = kernel_size + abs(kernel_size % 2 - 1 ) else: snake_case_ : List[Any] = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ : Optional[Any] = parse_args(sys.argv) lowercase__ : Dict = cva.imread(filename, 0) cva.imshow('''input image''', img) lowercase__ : List[Any] = img / 2_55 lowercase__ : int = out.astype('''float32''') lowercase__ : Any = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) lowercase__ : int = out * 2_55 lowercase__ : Any = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
155
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase__ : Any = None lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Tuple = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowercase__ : Union[str, Any] = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } lowercase__ : Any = { '''google/rembert''': 2_56, } lowercase__ : Optional[Any] = '''▁''' class _UpperCAmelCase ( lowerCAmelCase__): _lowerCAmelCase : Tuple = VOCAB_FILES_NAMES _lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase : Tuple = RemBertTokenizer def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : List[Any]=True , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : List[Any]="[CLS]" , lowercase_ : Union[str, Any]="[SEP]" , lowercase_ : str="<unk>" , lowercase_ : Tuple="[SEP]" , lowercase_ : Optional[int]="<pad>" , lowercase_ : List[Any]="[CLS]" , lowercase_ : Union[str, Any]="[MASK]" , **lowercase_ : Dict , ): # Mask token behave like a normal word, i.e. include the space before it snake_case_ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token super().__init__( lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , ) snake_case_ : Optional[int] = do_lower_case snake_case_ : List[Any] = remove_space snake_case_ : str = keep_accents snake_case_ : str = vocab_file snake_case_ : Optional[int] = False if not self.vocab_file else True def _snake_case ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): snake_case_ : Optional[int] = [self.sep_token_id] snake_case_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1] return [1] + ([0] * len(lowercase_ )) + [1] def _snake_case ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): snake_case_ : Union[str, Any] = [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None ): if not os.path.isdir(lowercase_ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase_ ) ) return snake_case_ : Optional[int] = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) return (out_vocab_file,)
155
1
"""simple docstring""" import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = torch.load(_lowercase, map_location="""cpu""" ) snake_case_ :Any = chkpt["""model"""] # We have the base model one level deeper than the original XLM repository snake_case_ :Dict = {} for k, v in state_dict.items(): if "pred_layer" in k: snake_case_ :Optional[Any] = v else: snake_case_ :List[str] = v snake_case_ :List[Any] = chkpt["""params"""] snake_case_ :str = {n: v for n, v in config.items() if not isinstance(_lowercase, (torch.FloatTensor, numpy.ndarray) )} snake_case_ :List[Any] = chkpt["""dico_word2id"""] snake_case_ :Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""", """""" ): i for s, i in vocab.items()} # Save pytorch-model snake_case_ :Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME snake_case_ :List[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME snake_case_ :Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""] print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(_lowercase, _lowercase ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(_lowercase, """w""", encoding="""utf-8""" ) as f: f.write(json.dumps(_lowercase, indent=2 ) + """\n""" ) print(f"""Save vocab file to {pytorch_config_dump_path}""" ) with open(_lowercase, """w""", encoding="""utf-8""" ) as f: f.write(json.dumps(_lowercase, indent=2 ) + """\n""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __a = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
66
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
0
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( a__ : Any , a__ : Optional[int] ) -> int: assert isinstance(a__ , a__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase__ ( a__ : Optional[Any] , a__ : Any , a__ : Union[str, Any] ) -> Optional[Any]: UpperCamelCase_ = tmp_path / """cache""" UpperCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase_ = ParquetDatasetReader(a__ , cache_dir=a__ , keep_in_memory=a__ ).read() _check_parquet_dataset(a__ , a__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase__ ( a__ : Optional[int] , a__ : Any , a__ : Optional[Any] ) -> Any: UpperCamelCase_ = tmp_path / """cache""" UpperCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase_ = features.copy() if features else default_expected_features UpperCamelCase_ = ( Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase_ = ParquetDatasetReader(a__ , features=a__ , cache_dir=a__ ).read() _check_parquet_dataset(a__ , a__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase__ ( a__ : Any , a__ : int , a__ : Tuple ) -> List[Any]: UpperCamelCase_ = tmp_path / """cache""" UpperCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase_ = ParquetDatasetReader(a__ , cache_dir=a__ , split=a__ ).read() _check_parquet_dataset(a__ , a__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase__ ( a__ : List[str] , a__ : List[str] , a__ : Optional[Any] ) -> List[Any]: if issubclass(a__ , a__ ): UpperCamelCase_ = parquet_path elif issubclass(a__ , a__ ): UpperCamelCase_ = [parquet_path] UpperCamelCase_ = tmp_path / """cache""" UpperCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase_ = ParquetDatasetReader(a__ , cache_dir=a__ ).read() _check_parquet_dataset(a__ , a__ ) def lowerCamelCase__ ( a__ : str , a__ : Optional[Any] , a__ : int=("train",) ) -> Union[str, Any]: assert isinstance(a__ , a__ ) for split in splits: UpperCamelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : Any , a__ : Union[str, Any] ) -> int: UpperCamelCase_ = tmp_path / """cache""" UpperCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase_ = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=a__ , keep_in_memory=a__ ).read() _check_parquet_datasetdict(a__ , a__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase__ ( a__ : Optional[Any] , a__ : Any , a__ : Dict ) -> Any: UpperCamelCase_ = tmp_path / """cache""" UpperCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase_ = features.copy() if features else default_expected_features UpperCamelCase_ = ( Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase_ = ParquetDatasetReader({"""train""": parquet_path} , features=a__ , cache_dir=a__ ).read() _check_parquet_datasetdict(a__ , a__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase__ ( a__ : Optional[int] , a__ : List[Any] , a__ : Tuple ) -> Tuple: if split: UpperCamelCase_ = {split: parquet_path} else: UpperCamelCase_ = """train""" UpperCamelCase_ = {"""train""": parquet_path, """test""": parquet_path} UpperCamelCase_ = tmp_path / """cache""" UpperCamelCase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase_ = ParquetDatasetReader(a__ , cache_dir=a__ ).read() _check_parquet_datasetdict(a__ , a__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( a__ : List[str] , a__ : Tuple ) -> List[Any]: UpperCamelCase_ = ParquetDatasetWriter(a__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 UpperCamelCase_ = pq.ParquetFile(tmp_path / """foo.parquet""" ) UpperCamelCase_ = pf.read() assert dataset.data.table == output_table def lowerCamelCase__ ( a__ : List[Any] , a__ : int ) -> Optional[int]: UpperCamelCase_ = str(shared_datadir / """test_image_rgb.jpg""" ) UpperCamelCase_ = {"""image""": [image_path]} UpperCamelCase_ = Features({"""image""": Image()} ) UpperCamelCase_ = Dataset.from_dict(a__ , features=a__ ) UpperCamelCase_ = ParquetDatasetWriter(a__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 UpperCamelCase_ = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features UpperCamelCase_ = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=a__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase__ ( a__ : str , a__ : Tuple ) -> List[str]: assert get_writer_batch_size(a__ ) == expected
354
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): A__ : Optional[Any] = CpmAntTokenizer A__ : Tuple = False def lowerCamelCase_ ( self ): """simple docstring""" super().setUp() UpperCamelCase_ = [ """<d>""", """</d>""", """<s>""", """</s>""", """</_>""", """<unk>""", """<pad>""", """</n>""", """我""", """是""", """C""", """P""", """M""", """A""", """n""", """t""", ] UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) @tooslow def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" ) UpperCamelCase_ = """今天天气真好!""" UpperCamelCase_ = ["""今天""", """天气""", """真""", """好""", """!"""] UpperCamelCase_ = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase_ = """今天天气真好!""" UpperCamelCase_ = [tokenizer.bos_token] + tokens UpperCamelCase_ = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) UpperCamelCase_ = tokenizer.decode(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase )
261
0
"""simple docstring""" import torch from diffusers import StableDiffusionPipeline __A = "path-to-your-trained-model" __A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda") __A = "A photo of sks dog in a bucket" __A = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0] image.save("dog-bucket.png")
177
"""simple docstring""" import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase__ ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : List[str] ): # Initialise PyTorch model snake_case : Optional[Any] = TaConfig.from_json_file(lowercase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case : Tuple = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowercase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __A = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
148
0
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig __lowerCamelCase = logging.getLogger(__name__) class A__ ( _a ): lowercase = """masked_bert""" def __init__( self , UpperCamelCase__=30522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__=0 , UpperCamelCase__="topK" , UpperCamelCase__="constant" , UpperCamelCase__=0.0 , **UpperCamelCase__ , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = hidden_act A_ = intermediate_size A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_size A_ = initializer_range A_ = layer_norm_eps A_ = pruning_method A_ = mask_init A_ = mask_scale
371
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class A__ ( unittest.TestCase ): def snake_case_ ( self ) -> Any: '''simple docstring''' A_ = tempfile.mkdtemp() # fmt: off A_ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on A_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) A_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] A_ = {"""unk_token""": """<unk>"""} A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase__ ) ) A_ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } A_ = os.path.join(self.tmpdirname , UpperCamelCase__ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def snake_case_ ( self , **UpperCamelCase__ ) -> str: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def snake_case_ ( self , **UpperCamelCase__ ) -> Any: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def snake_case_ ( self ) -> List[Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def snake_case_ ( self ) -> Any: '''simple docstring''' A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self ) -> Optional[int]: '''simple docstring''' A_ = self.get_tokenizer() A_ = self.get_rust_tokenizer() A_ = self.get_image_processor() A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) A_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ ) A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) A_ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ ) def snake_case_ ( self ) -> str: '''simple docstring''' A_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A_ = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) A_ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def snake_case_ ( self ) -> str: '''simple docstring''' A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCamelCase__ , return_tensors="""np""" ) A_ = processor(images=UpperCamelCase__ , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def snake_case_ ( self ) -> Optional[int]: '''simple docstring''' A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) A_ = """lower newer""" A_ = processor(text=UpperCamelCase__ ) A_ = tokenizer(UpperCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) A_ = """lower newer""" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def snake_case_ ( self ) -> Union[str, Any]: '''simple docstring''' A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ = processor.batch_decode(UpperCamelCase__ ) A_ = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def snake_case_ ( self ) -> Optional[int]: '''simple docstring''' A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) A_ = """lower newer""" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
101
0
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def A (__A : int ) -> List[Any]: # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def A () -> Dict: """simple docstring""" with parallel_backend('''spark''' ): assert ParallelBackendConfig.backend_name == "spark" UpperCAmelCase_ = [1, 2, 3] with pytest.raises(lowerCamelCase__ ): with parallel_backend('''unsupported backend''' ): map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=2 ) with pytest.raises(lowerCamelCase__ ): with parallel_backend('''unsupported backend''' ): map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('''num_proc''' , [2, -1] ) def A (__A : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ = [1, 2] UpperCAmelCase_ = {"""a""": 1, """b""": 2} UpperCAmelCase_ = {"""a""": [1, 2], """b""": [3, 4]} UpperCAmelCase_ = {"""a""": {"""1""": 1}, """b""": 2} UpperCAmelCase_ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} UpperCAmelCase_ = [2, 3] UpperCAmelCase_ = {"""a""": 2, """b""": 3} UpperCAmelCase_ = {"""a""": [2, 3], """b""": [4, 5]} UpperCAmelCase_ = {"""a""": {"""1""": 2}, """b""": 3} UpperCAmelCase_ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} with parallel_backend('''spark''' ): assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
51
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCAmelCase : def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ): A_ : str = parent A_ : int = batch_size A_ : Dict = seq_length A_ : Any = is_training A_ : List[str] = use_input_mask A_ : Any = use_token_type_ids A_ : int = use_labels A_ : str = vocab_size A_ : Any = hidden_size A_ : Optional[Any] = num_hidden_layers A_ : Dict = num_attention_heads A_ : Dict = intermediate_size A_ : Optional[int] = hidden_act A_ : int = hidden_dropout_prob A_ : Optional[int] = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : List[Any] = type_vocab_size A_ : Any = type_sequence_label_size A_ : Tuple = initializer_range A_ : int = num_labels A_ : Optional[int] = num_choices A_ : Optional[int] = scope def _a (self ): A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : List[str] = None if self.use_input_mask: A_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) A_ : int = None if self.use_token_type_ids: A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Tuple = None A_ : List[str] = None A_ : List[str] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : str = ids_tensor([self.batch_size] , self.num_choices ) A_ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a (self ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): A_ : List[Any] = BioGptModel(config=lowercase ) model.to(lowercase ) model.eval() A_ : List[Any] = model(lowercase , attention_mask=lowercase ) A_ : str = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ): A_ : List[Any] = BioGptForCausalLM(config=lowercase ) model.to(lowercase ) model.eval() A_ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ): A_ : Optional[Any] = BioGptModel(config=lowercase ) model.to(lowercase ) model.eval() # create attention mask A_ : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase ) A_ : Optional[Any] = self.seq_length // 2 A_ : List[Any] = 0 # first forward pass A_, A_ : List[str] = model(lowercase , attention_mask=lowercase ).to_tuple() # create hypothetical next token and extent to next_input_ids A_ : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids A_ : Union[str, Any] = ids_tensor((1,) , lowercase ).item() + 1 A_ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) A_ : Optional[int] = random_other_next_tokens # append to next input_ids and attn_mask A_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) A_ : Any = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase )] , dim=1 , ) # get two different outputs A_ : List[Any] = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""] A_ : Optional[int] = model(lowercase , past_key_values=lowercase , attention_mask=lowercase )["""last_hidden_state"""] # select random slice A_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() A_ : int = output_from_no_past[:, -1, random_slice_idx].detach() A_ : List[Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ): A_ : Optional[int] = BioGptModel(config=lowercase ).to(lowercase ).eval() A_ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase ) # first forward pass A_ : int = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) A_, A_ : int = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids A_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A_ : str = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and A_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) A_ : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) A_ : List[str] = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""] A_ : Tuple = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[ """last_hidden_state""" ] # select random slice A_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() A_ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach() A_ : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False ): A_ : Union[str, Any] = BioGptForCausalLM(lowercase ) model.to(lowercase ) if gradient_checkpointing: model.gradient_checkpointing_enable() A_ : Dict = model(lowercase , labels=lowercase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def _a (self , lowercase , *lowercase ): A_ : Union[str, Any] = BioGptModel(lowercase ) A_ : str = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ): A_ : Union[str, Any] = self.num_labels A_ : Optional[int] = BioGptForTokenClassification(lowercase ) model.to(lowercase ) model.eval() A_ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a (self ): A_ : List[Any] = self.prepare_config_and_inputs() ( ( A_ ), ( A_ ), ( A_ ), ( A_ ), ( A_ ), ( A_ ), ( A_ ), ) : Dict = config_and_inputs A_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : str = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : int = (BioGptForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Optional[Any] = ( { 'feature-extraction': BioGptModel, 'text-classification': BioGptForSequenceClassification, 'text-generation': BioGptForCausalLM, 'token-classification': BioGptForTokenClassification, 'zero-shot': BioGptForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : List[str] = False def _a (self ): A_ : Tuple = BioGptModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def _a (self ): self.config_tester.run_common_tests() def _a (self ): A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def _a (self ): A_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A_ : int = type self.model_tester.create_and_check_model(*lowercase ) def _a (self ): A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase ) def _a (self ): A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*lowercase , gradient_checkpointing=lowercase ) def _a (self ): A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase ) def _a (self ): A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase ) def _a (self ): A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase ) @slow def _a (self ): A_ : str = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(lowercase ) A_ : List[str] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) A_ : List[str] = """left""" # Define PAD Token = EOS Token = 50256 A_ : Any = tokenizer.eos_token A_ : Dict = model.config.eos_token_id # use different length sentences to test batching A_ : List[Any] = [ """Hello, my dog is a little""", """Today, I""", ] A_ : List[str] = tokenizer(lowercase , return_tensors="""pt""" , padding=lowercase ) A_ : List[str] = inputs["""input_ids"""].to(lowercase ) A_ : List[Any] = model.generate( input_ids=lowercase , attention_mask=inputs["""attention_mask"""].to(lowercase ) , ) A_ : List[str] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(lowercase ) A_ : List[Any] = model.generate(input_ids=lowercase ) A_ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() A_ : Union[str, Any] = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(lowercase ) A_ : Any = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings ) A_ : List[str] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) A_ : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase ) A_ : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase ) A_ : Union[str, Any] = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] ) @slow def _a (self ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : int = BioGptModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def _a (self ): A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[Any] = 3 A_ : Union[str, Any] = input_dict["""input_ids"""] A_ : List[Any] = input_ids.ne(1 ).to(lowercase ) A_ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A_ : Union[str, Any] = BioGptForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() A_ : Tuple = model(lowercase , attention_mask=lowercase , labels=lowercase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a (self ): A_, A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = 3 A_ : Dict = """multi_label_classification""" A_ : List[Any] = input_dict["""input_ids"""] A_ : Tuple = input_ids.ne(1 ).to(lowercase ) A_ : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) A_ : Dict = BioGptForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() A_ : int = model(lowercase , attention_mask=lowercase , labels=lowercase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): @slow def _a (self ): A_ : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) A_ : Optional[Any] = torch.tensor([[2, 4805, 9, 656, 21]] ) A_ : Dict = model(lowercase )[0] A_ : Any = 42384 A_ : Union[str, Any] = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , lowercase ) A_ : Union[str, Any] = torch.tensor( [[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) ) @slow def _a (self ): A_ : Union[str, Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) A_ : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(lowercase ) torch.manual_seed(0 ) A_ : Dict = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(lowercase ) A_ : Optional[int] = model.generate( **lowercase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowercase , ) A_ : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase ) A_ : Any = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(lowercase , lowercase )
206
0
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowerCAmelCase__ : Optional[Any] =getLogger(__name__) lowerCAmelCase__ : Tuple ='cuda' if torch.cuda.is_available() else 'cpu' def a__ ( A__, A__, A__, A__ = 8, A__ = DEFAULT_DEVICE, A__=False, A__="summarization", A__=None, **A__, ): SCREAMING_SNAKE_CASE_ : List[Any] = Path(A__ ).open('w', encoding='utf-8' ) SCREAMING_SNAKE_CASE_ : Tuple = str(A__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ ) if fpaa: SCREAMING_SNAKE_CASE_ : Optional[Any] = model.half() SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained(A__ ) logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. SCREAMING_SNAKE_CASE_ : int = time.time() # update config with task specific params use_task_specific_params(A__, A__ ) if prefix is None: SCREAMING_SNAKE_CASE_ : Tuple = prefix or getattr(model.config, 'prefix', '' ) or '' for examples_chunk in tqdm(list(chunks(A__, A__ ) ) ): SCREAMING_SNAKE_CASE_ : List[Any] = [prefix + text for text in examples_chunk] SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(A__, return_tensors='pt', truncation=A__, padding='longest' ).to(A__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate( input_ids=batch.input_ids, attention_mask=batch.attention_mask, **A__, ) SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.batch_decode(A__, skip_special_tokens=A__, clean_up_tokenization_spaces=A__ ) for hypothesis in dec: fout.write(hypothesis + '\n' ) fout.flush() fout.close() SCREAMING_SNAKE_CASE_ : Dict = int(time.time() - start_time ) # seconds SCREAMING_SNAKE_CASE_ : List[Any] = len(A__ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4 )} def a__ ( ): return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' ) def a__ ( A__=True ): SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser() parser.add_argument('model_name', type=A__, help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('input_path', type=A__, help='like cnn_dm/test.source' ) parser.add_argument('save_path', type=A__, help='where to save summaries' ) parser.add_argument('--reference_path', type=A__, required=A__, help='like cnn_dm/test.target' ) parser.add_argument('--score_path', type=A__, required=A__, default='metrics.json', help='where to save metrics' ) parser.add_argument('--device', type=A__, required=A__, default=A__, help='cuda, cuda:1, cpu etc.' ) parser.add_argument( '--prefix', type=A__, required=A__, default=A__, help='will be added to the begininng of src examples' ) parser.add_argument('--task', type=A__, default='summarization', help='used for task_specific_params + metrics' ) parser.add_argument('--bs', type=A__, default=8, required=A__, help='batch size' ) parser.add_argument( '--n_obs', type=A__, default=-1, required=A__, help='How many observations. Defaults to all.' ) parser.add_argument('--fp16', action='store_true' ) parser.add_argument('--dump-args', action='store_true', help='print the custom hparams with the results' ) parser.add_argument( '--info', nargs='?', type=A__, const=datetime_now(), help=( 'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.' ' lang=en-ru. If no value is passed, the current datetime string will be used.' ), ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_known_args() SCREAMING_SNAKE_CASE_ : str = parse_numeric_n_bool_cl_kwargs(A__ ) if parsed_args and verbose: print(F'''parsed the following generate kwargs: {parsed_args}''' ) SCREAMING_SNAKE_CASE_ : str = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: SCREAMING_SNAKE_CASE_ : Optional[Any] = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=A__ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('Can\'t mix --fp16 and --device cpu' ) SCREAMING_SNAKE_CASE_ : Optional[int] = generate_summaries_or_translations( A__, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fpaa=args.fpaa, task=args.task, prefix=args.prefix, **A__, ) if args.reference_path is None: return {} # Compute scores SCREAMING_SNAKE_CASE_ : Any = calculate_bleu if 'translation' in args.task else calculate_rouge SCREAMING_SNAKE_CASE_ : Optional[int] = [x.rstrip() for x in open(args.save_path ).readlines()] SCREAMING_SNAKE_CASE_ : List[str] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )] SCREAMING_SNAKE_CASE_ : dict = score_fn(A__, A__ ) scores.update(A__ ) if args.dump_args: scores.update(A__ ) if args.info: SCREAMING_SNAKE_CASE_ : str = args.info if verbose: print(A__ ) if args.score_path is not None: json.dump(A__, open(args.score_path, 'w' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
162
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) lowerCAmelCase__ : Optional[Any] =logging.getLogger(__name__) def a__ ( A__, A__ ): SCREAMING_SNAKE_CASE_ : Any = np.argmax(A__, axis=1 ) return np.sum(outputs == labels ) def a__ ( A__ ): with open(A__, encoding='utf_8' ) as f: SCREAMING_SNAKE_CASE_ : int = csv.reader(A__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] next(A__ ) # skip the first line for line in tqdm(A__ ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def a__ ( A__, A__, A__, A__, A__, A__ ): SCREAMING_SNAKE_CASE_ : str = [] for dataset in encoded_datasets: SCREAMING_SNAKE_CASE_ : str = len(A__ ) SCREAMING_SNAKE_CASE_ : List[Any] = np.zeros((n_batch, 2, input_len), dtype=np.intaa ) SCREAMING_SNAKE_CASE_ : str = np.zeros((n_batch, 2), dtype=np.intaa ) SCREAMING_SNAKE_CASE_ : Optional[int] = np.full((n_batch, 2, input_len), fill_value=-1_0_0, dtype=np.intaa ) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros((n_batch,), dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(A__ ): SCREAMING_SNAKE_CASE_ : List[str] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] SCREAMING_SNAKE_CASE_ : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] SCREAMING_SNAKE_CASE_ : Any = with_conta SCREAMING_SNAKE_CASE_ : Union[str, Any] = with_conta SCREAMING_SNAKE_CASE_ : Dict = len(A__ ) - 1 SCREAMING_SNAKE_CASE_ : str = len(A__ ) - 1 SCREAMING_SNAKE_CASE_ : Any = with_conta SCREAMING_SNAKE_CASE_ : str = with_conta SCREAMING_SNAKE_CASE_ : List[str] = mc_label SCREAMING_SNAKE_CASE_ : Any = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(A__ ) for t in all_inputs ) ) return tensor_datasets def a__ ( ): SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser() parser.add_argument('--model_name', type=A__, default='openai-gpt', help='pretrained model name' ) parser.add_argument('--do_train', action='store_true', help='Whether to run training.' ) parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir', default=A__, type=A__, required=A__, help='The output directory where the model predictions and checkpoints will be written.', ) parser.add_argument('--train_dataset', type=A__, default='' ) parser.add_argument('--eval_dataset', type=A__, default='' ) parser.add_argument('--seed', type=A__, default=4_2 ) parser.add_argument('--num_train_epochs', type=A__, default=3 ) parser.add_argument('--train_batch_size', type=A__, default=8 ) parser.add_argument('--eval_batch_size', type=A__, default=1_6 ) parser.add_argument('--adam_epsilon', default=1E-8, type=A__, help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm', type=A__, default=1 ) parser.add_argument( '--max_steps', default=-1, type=A__, help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ), ) parser.add_argument( '--gradient_accumulation_steps', type=A__, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.', ) parser.add_argument('--learning_rate', type=A__, default=6.25E-5 ) parser.add_argument('--warmup_steps', default=0, type=A__, help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule', type=A__, default='warmup_linear' ) parser.add_argument('--weight_decay', type=A__, default=0.01 ) parser.add_argument('--lm_coef', type=A__, default=0.9 ) parser.add_argument('--n_valid', type=A__, default=3_7_4 ) parser.add_argument('--server_ip', type=A__, default='', help='Can be used for distant debugging.' ) parser.add_argument('--server_port', type=A__, default='', help='Can be used for distant debugging.' ) SCREAMING_SNAKE_CASE_ : Any = parser.parse_args() print(A__ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=A__ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) SCREAMING_SNAKE_CASE_ : str = torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(A__, A__ ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset SCREAMING_SNAKE_CASE_ : List[Any] = ['_start_', '_delimiter_', '_classify_'] SCREAMING_SNAKE_CASE_ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(A__ ) SCREAMING_SNAKE_CASE_ : int = tokenizer.convert_tokens_to_ids(A__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(A__ ) ) model.to(A__ ) # Load and encode the datasets def tokenize_and_encode(A__ ): if isinstance(A__, A__ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A__ ) ) elif isinstance(A__, A__ ): return obj return [tokenize_and_encode(A__ ) for o in obj] logger.info('Encoding dataset...' ) SCREAMING_SNAKE_CASE_ : int = load_rocstories_dataset(args.train_dataset ) SCREAMING_SNAKE_CASE_ : int = load_rocstories_dataset(args.eval_dataset ) SCREAMING_SNAKE_CASE_ : Optional[Any] = (train_dataset, eval_dataset) SCREAMING_SNAKE_CASE_ : List[str] = tokenize_and_encode(A__ ) # Compute the max input length for the Transformer SCREAMING_SNAKE_CASE_ : Tuple = model.config.n_positions // 2 - 2 SCREAMING_SNAKE_CASE_ : Optional[int] = max( len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) SCREAMING_SNAKE_CASE_ : str = min(A__, model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders SCREAMING_SNAKE_CASE_ : Tuple = pre_process_datasets(A__, A__, A__, *A__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = tensor_datasets[0], tensor_datasets[1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = TensorDataset(*A__ ) SCREAMING_SNAKE_CASE_ : str = RandomSampler(A__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(A__, sampler=A__, batch_size=args.train_batch_size ) SCREAMING_SNAKE_CASE_ : List[Any] = TensorDataset(*A__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = SequentialSampler(A__ ) SCREAMING_SNAKE_CASE_ : str = DataLoader(A__, sampler=A__, batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: SCREAMING_SNAKE_CASE_ : int = args.max_steps SCREAMING_SNAKE_CASE_ : Any = args.max_steps // (len(A__ ) // args.gradient_accumulation_steps) + 1 else: SCREAMING_SNAKE_CASE_ : List[Any] = len(A__ ) // args.gradient_accumulation_steps * args.num_train_epochs SCREAMING_SNAKE_CASE_ : Optional[Any] = list(model.named_parameters() ) SCREAMING_SNAKE_CASE_ : Optional[int] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] SCREAMING_SNAKE_CASE_ : Optional[Any] = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] SCREAMING_SNAKE_CASE_ : Optional[Any] = AdamW(A__, lr=args.learning_rate, eps=args.adam_epsilon ) SCREAMING_SNAKE_CASE_ : List[Any] = get_linear_schedule_with_warmup( A__, num_warmup_steps=args.warmup_steps, num_training_steps=A__ ) if args.do_train: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ), desc='Epoch' ): SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : str = 0 SCREAMING_SNAKE_CASE_ : List[Any] = tqdm(A__, desc='Training' ) for step, batch in enumerate(A__ ): SCREAMING_SNAKE_CASE_ : List[Any] = tuple(t.to(A__ ) for t in batch ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = batch SCREAMING_SNAKE_CASE_ : Tuple = model(A__, mc_token_ids=A__, lm_labels=A__, mc_labels=A__ ) SCREAMING_SNAKE_CASE_ : str = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() SCREAMING_SNAKE_CASE_ : Tuple = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 SCREAMING_SNAKE_CASE_ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(A__, scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer SCREAMING_SNAKE_CASE_ : List[str] = model.module if hasattr(A__, 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(args.output_dir, A__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(args.output_dir, A__ ) torch.save(model_to_save.state_dict(), A__ ) model_to_save.config.to_json_file(A__ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned SCREAMING_SNAKE_CASE_ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) SCREAMING_SNAKE_CASE_ : int = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(A__ ) if args.do_eval: model.eval() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = 0, 0 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = 0, 0 for batch in tqdm(A__, desc='Evaluating' ): SCREAMING_SNAKE_CASE_ : int = tuple(t.to(A__ ) for t in batch ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = batch with torch.no_grad(): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = model( A__, mc_token_ids=A__, lm_labels=A__, mc_labels=A__ ) SCREAMING_SNAKE_CASE_ : List[Any] = mc_logits.detach().cpu().numpy() SCREAMING_SNAKE_CASE_ : Union[str, Any] = mc_labels.to('cpu' ).numpy() SCREAMING_SNAKE_CASE_ : Dict = accuracy(A__, A__ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 SCREAMING_SNAKE_CASE_ : List[str] = eval_loss / nb_eval_steps SCREAMING_SNAKE_CASE_ : List[Any] = eval_accuracy / nb_eval_examples SCREAMING_SNAKE_CASE_ : List[Any] = tr_loss / nb_tr_steps if args.do_train else None SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} SCREAMING_SNAKE_CASE_ : int = os.path.join(args.output_dir, 'eval_results.txt' ) with open(A__, 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s', A__, str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
162
1
'''simple docstring''' import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=5_6 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=7 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , _UpperCAmelCase="block_sparse" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=3 , ) -> Optional[Any]: __UpperCamelCase : int = parent __UpperCamelCase : Tuple = batch_size __UpperCamelCase : List[Any] = seq_length __UpperCamelCase : int = is_training __UpperCamelCase : Optional[Any] = use_attention_mask __UpperCamelCase : Tuple = use_token_type_ids __UpperCamelCase : List[Any] = use_labels __UpperCamelCase : Dict = vocab_size __UpperCamelCase : Optional[int] = hidden_size __UpperCamelCase : List[Any] = num_hidden_layers __UpperCamelCase : Dict = num_attention_heads __UpperCamelCase : Tuple = intermediate_size __UpperCamelCase : List[Any] = hidden_act __UpperCamelCase : List[Any] = hidden_dropout_prob __UpperCamelCase : int = attention_probs_dropout_prob __UpperCamelCase : Tuple = max_position_embeddings __UpperCamelCase : Optional[int] = type_vocab_size __UpperCamelCase : Optional[int] = type_sequence_label_size __UpperCamelCase : List[str] = initializer_range __UpperCamelCase : str = num_choices __UpperCamelCase : Tuple = rescale_embeddings __UpperCamelCase : Tuple = attention_type __UpperCamelCase : List[str] = use_bias __UpperCamelCase : str = block_size __UpperCamelCase : List[str] = num_random_blocks def a_ (self ) -> Optional[Any]: __UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : List[str] = None if self.use_attention_mask: __UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : List[str] = None if self.use_token_type_ids: __UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase : Any = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() __UpperCamelCase : List[str] = config_and_inputs __UpperCamelCase : Tuple = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask, } return config, inputs_dict @require_flax class A ( _a , unittest.TestCase ): '''simple docstring''' A = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) A = False A = False def a_ (self ) -> List[Any]: __UpperCamelCase : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def a_ (self ) -> Optional[int]: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def a_ (self ) -> Optional[Any]: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def a_ (self ) -> Dict: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def a_ (self ) -> int: super().test_hidden_states_output() @slow def a_ (self ) -> Union[str, Any]: for model_class_name in self.all_model_classes: __UpperCamelCase : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(snake_case_ ) def a_ (self ) -> Tuple: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def a_ (self ) -> Optional[Any]: __UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase : str = self._prepare_for_class(snake_case_ , snake_case_ ) __UpperCamelCase : Tuple = model_class(snake_case_ ) @jax.jit def model_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ): return model(input_ids=snake_case_ , attention_mask=snake_case_ , **snake_case_ ) with self.subTest("JIT Enabled" ): __UpperCamelCase : Optional[Any] = model_jitted(**snake_case_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __UpperCamelCase : Any = model_jitted(**snake_case_ ).to_tuple() self.assertEqual(len(snake_case_ ) , len(snake_case_ ) ) for jitted_output, output in zip(snake_case_ , snake_case_ ): self.assertEqual(jitted_output.shape , output.shape ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1E-5 , _UpperCAmelCase="outputs" , _UpperCAmelCase=None ) -> int: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
298
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean UpperCamelCase_ = 0 UpperCamelCase_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right UpperCamelCase_ = tuple[int, int] class a_ : def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ): _lowerCAmelCase : Optional[int] = pos_x _lowerCAmelCase : List[str] = pos_y _lowerCAmelCase : Tuple = (pos_y, pos_x) _lowerCAmelCase : List[Any] = goal_x _lowerCAmelCase : int = goal_y _lowerCAmelCase : Union[str, Any] = g_cost _lowerCAmelCase : List[Any] = parent _lowerCAmelCase : List[Any] = self.calculate_heuristic() _lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost def __UpperCamelCase ( self ): _lowerCAmelCase : List[str] = self.pos_x - self.goal_x _lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(snake_case_ ) + abs(snake_case_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , snake_case_ ): return self.f_cost < other.f_cost class a_ : def __init__( self , snake_case_ , snake_case_ ): _lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ ) _lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ ) _lowerCAmelCase : List[str] = [self.start] _lowerCAmelCase : list[Node] = [] _lowerCAmelCase : List[str] = False def __UpperCamelCase ( self ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() _lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(snake_case_ ) self.closed_nodes.append(snake_case_ ) _lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(snake_case_ ) else: # retrieve the best current path _lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(snake_case_ ) else: self.open_nodes.append(snake_case_ ) return [self.start.pos] def __UpperCamelCase ( self , snake_case_ ): _lowerCAmelCase : Union[str, Any] = [] for action in delta: _lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1] _lowerCAmelCase : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) ) return successors def __UpperCamelCase ( self , snake_case_ ): _lowerCAmelCase : List[Any] = node _lowerCAmelCase : Optional[Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _lowerCAmelCase : Optional[int] = current_node.parent path.reverse() return path class a_ : def __init__( self , snake_case_ , snake_case_ ): _lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ ) _lowerCAmelCase : int = AStar(snake_case_ , snake_case_ ) _lowerCAmelCase : Optional[int] = False def __UpperCamelCase ( self ): while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() _lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 ) _lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( snake_case_ , snake_case_ ) self.fwd_astar.closed_nodes.append(snake_case_ ) self.bwd_astar.closed_nodes.append(snake_case_ ) _lowerCAmelCase : List[str] = current_bwd_node _lowerCAmelCase : Dict = current_fwd_node _lowerCAmelCase : Any = { self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ), self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(snake_case_ ) else: # retrieve the best current path _lowerCAmelCase : List[Any] = astar.open_nodes.pop( astar.open_nodes.index(snake_case_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(snake_case_ ) else: astar.open_nodes.append(snake_case_ ) return [self.fwd_astar.start.pos] def __UpperCamelCase ( self , snake_case_ , snake_case_ ): _lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ ) _lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ ) bwd_path.pop() bwd_path.reverse() _lowerCAmelCase : Dict = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] UpperCamelCase_ = (0, 0) UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) UpperCamelCase_ = time.time() UpperCamelCase_ = AStar(init, goal) UpperCamelCase_ = a_star.search() UpperCamelCase_ = time.time() - start_time print(F'AStar execution time = {end_time:f} seconds') UpperCamelCase_ = time.time() UpperCamelCase_ = BidirectionalAStar(init, goal) UpperCamelCase_ = time.time() - bd_start_time print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
309
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def UpperCAmelCase ( a_ ) -> Tuple: """simple docstring""" __A = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = StableDiffusionLatentUpscalePipeline snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } snake_case_ = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case_ = frozenset([] ) snake_case_ = True @property def UpperCamelCase_ ( self : str ): __A = 1 __A = 4 __A = (16, 16) __A = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_lowerCAmelCase ) return image def UpperCamelCase_ ( self : List[str] ): torch.manual_seed(0 ) __A = UNetaDConditionModel( act_fn="gelu" ,attention_head_dim=8 ,norm_num_groups=_lowerCAmelCase ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=1_60 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ) ,in_channels=8 ,mid_block_type=_lowerCAmelCase ,only_cross_attention=_lowerCAmelCase ,out_channels=5 ,resnet_time_scale_shift="scale_shift" ,time_embedding_type="fourier" ,timestep_post_act="gelu" ,up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") ,) __A = AutoencoderKL( block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) __A = EulerDiscreteScheduler(prediction_type="sample" ) __A = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="quick_gelu" ,projection_dim=5_12 ,) __A = CLIPTextModel(_lowerCAmelCase ) __A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __A = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def UpperCamelCase_ ( self : List[str] ,A : Optional[Any] ,A : str=0 ): if str(_lowerCAmelCase ).startswith("mps" ): __A = torch.manual_seed(_lowerCAmelCase ) else: __A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) __A = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def UpperCamelCase_ ( self : Union[str, Any] ): __A = "cpu" __A = self.get_dummy_components() __A = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __A = self.get_dummy_inputs(_lowerCAmelCase ) __A = pipe(**_lowerCAmelCase ).images __A = image[0, -3:, -3:, -1] self.assertEqual(image.shape ,(1, 2_56, 2_56, 3) ) __A = np.array( [0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] ) __A = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowerCAmelCase ,1E-3 ) def UpperCamelCase_ ( self : Dict ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def UpperCamelCase_ ( self : Optional[Any] ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self : List[Any] ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def UpperCamelCase_ ( self : Dict ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def UpperCamelCase_ ( self : int ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self : List[Any] ): super().test_save_load_local(expected_max_difference=3E-3 ) def UpperCamelCase_ ( self : int ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def UpperCamelCase_ ( self : Optional[int] ): __A = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", ] __A = self.get_dummy_components() __A = self.pipeline_class(**_lowerCAmelCase ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __A = self.get_dummy_inputs(_lowerCAmelCase ) __A = 2 __A = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue __A = getattr(_lowerCAmelCase ,scheduler_enum.name ) __A = scheduler_cls.from_config(pipe.scheduler.config ) __A = pipe(**_lowerCAmelCase )[0] outputs.append(_lowerCAmelCase ) assert check_same_shape(_lowerCAmelCase ) @require_torch_gpu @slow class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : Optional[int] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self : Tuple ): __A = torch.manual_seed(33 ) __A = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ,torch_dtype=torch.floataa ) pipe.to("cuda" ) __A = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" ,torch_dtype=torch.floataa ) upscaler.to("cuda" ) __A = "a photo of an astronaut high resolution, unreal engine, ultra realistic" __A = pipe(_lowerCAmelCase ,generator=_lowerCAmelCase ,output_type="latent" ).images __A = upscaler( prompt=_lowerCAmelCase ,image=_lowerCAmelCase ,num_inference_steps=20 ,guidance_scale=0 ,generator=_lowerCAmelCase ,output_type="np" ,).images[0] __A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean() ) < 5E-2 def UpperCamelCase_ ( self : Union[str, Any] ): __A = torch.manual_seed(33 ) __A = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" ,torch_dtype=torch.floataa ) upscaler.to("cuda" ) __A = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" __A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) __A = upscaler( prompt=_lowerCAmelCase ,image=_lowerCAmelCase ,num_inference_steps=20 ,guidance_scale=0 ,generator=_lowerCAmelCase ,output_type="np" ,).images[0] __A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max() ) < 5E-2
354
from math import log from scipy.constants import Boltzmann, physical_constants SCREAMING_SNAKE_CASE :Dict = 300 # TEMPERATURE (unit = K) def UpperCAmelCase ( a_ , a_ , a_ , ) -> float: """simple docstring""" if donor_conc <= 0: raise ValueError("Donor concentration should be positive" ) elif acceptor_conc <= 0: raise ValueError("Acceptor concentration should be positive" ) elif intrinsic_conc <= 0: raise ValueError("Intrinsic concentration should be positive" ) elif donor_conc <= intrinsic_conc: raise ValueError( "Donor concentration should be greater than intrinsic concentration" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( "Acceptor concentration should be greater than intrinsic concentration" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
124
0
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets __UpperCamelCase : Optional[int] = """\ @inproceedings{lin-2004-rouge, title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\", author = \"Lin, Chin-Yew\", booktitle = \"Text Summarization Branches Out\", month = jul, year = \"2004\", address = \"Barcelona, Spain\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W04-1013\", pages = \"74--81\", } """ __UpperCamelCase : List[Any] = """\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ __UpperCamelCase : Any = """ Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring, `\"rougeL\"`: Longest common subsequence based scoring. `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results[\"rouge1\"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results[\"rouge1\"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE( datasets.Metric ): def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def lowerCAmelCase_ ( self: int , UpperCamelCase: Dict , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any]=None , UpperCamelCase: Tuple=True , UpperCamelCase: Tuple=False ) -> int: if rouge_types is None: snake_case__ = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] snake_case__ = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase , use_stemmer=UpperCamelCase ) if use_aggregator: snake_case__ = scoring.BootstrapAggregator() else: snake_case__ = [] for ref, pred in zip(UpperCamelCase , UpperCamelCase ): snake_case__ = scorer.score(UpperCamelCase , UpperCamelCase ) if use_aggregator: aggregator.add_scores(UpperCamelCase ) else: scores.append(UpperCamelCase ) if use_aggregator: snake_case__ = aggregator.aggregate() else: snake_case__ = {} for key in scores[0]: snake_case__ = [score[key] for score in scores] return result
307
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "gptsan-japanese" _UpperCAmelCase = [ "past_key_values", ] _UpperCAmelCase = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]: snake_case__ = vocab_size snake_case__ = max_position_embeddings snake_case__ = d_model snake_case__ = d_ff snake_case__ = d_ext snake_case__ = d_spout snake_case__ = num_switch_layers snake_case__ = num_ext_layers snake_case__ = num_switch_layers + num_ext_layers snake_case__ = num_heads snake_case__ = num_experts snake_case__ = expert_capacity snake_case__ = dropout_rate snake_case__ = layer_norm_epsilon snake_case__ = router_bias snake_case__ = router_jitter_noise snake_case__ = router_dtype snake_case__ = router_ignore_padding_tokens snake_case__ = output_hidden_states snake_case__ = output_attentions snake_case__ = initializer_factor snake_case__ = output_router_logits snake_case__ = use_cache super().__init__( separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
307
1
"""simple docstring""" def lowercase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: list[int] ) -> tuple[float, float]: '''simple docstring''' if not len(_lowerCamelCase ) == len(_lowerCamelCase ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients __lowerCamelCase : List[str] = equationa __lowerCamelCase : Any = equationa # Calculate the determinants of the matrices __lowerCamelCase : Dict = aa * ba - aa * ba __lowerCamelCase : str = ca * ba - ca * ba __lowerCamelCase : str = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __lowerCamelCase : int = determinant_x / determinant __lowerCamelCase : Dict = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
363
"""simple docstring""" def lowercase_ ( _lowerCamelCase: int = 100 ) -> int: '''simple docstring''' __lowerCamelCase : Optional[Any] = set() __lowerCamelCase : Union[str, Any] = 0 __lowerCamelCase : Optional[Any] = n + 1 # maximum limit for a in range(2 , _lowerCamelCase ): for b in range(2 , _lowerCamelCase ): __lowerCamelCase : Union[str, Any] = a**b # calculates the current power collect_powers.add(_lowerCamelCase ) # adds the result to the set return len(_lowerCamelCase ) if __name__ == "__main__": print('''Number of terms ''', solution(int(str(input()).strip())))
64
0
from typing import TYPE_CHECKING from ...utils import _LazyModule __snake_case = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
348
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class a__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) ->Dict: """simple docstring""" return f"gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy" def _lowercase ( self : Any ) ->Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() def _lowercase ( self : str , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Tuple=(4, 4, 6_4, 6_4) , UpperCAmelCase__ : Optional[int]=False ) ->List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : int = jnp.bfloataa if fpaa else jnp.floataa SCREAMING_SNAKE_CASE : Tuple = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) , dtype=UpperCAmelCase__ ) return image def _lowercase ( self : Tuple , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Tuple="CompVis/stable-diffusion-v1-4" ) ->Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa SCREAMING_SNAKE_CASE : Dict = """bf16""" if fpaa else None SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = FlaxUNetaDConditionModel.from_pretrained( UpperCAmelCase__ , subfolder="""unet""" , dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ ) return model, params def _lowercase ( self : Optional[int] , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : List[str]=(4, 7_7, 7_6_8) , UpperCAmelCase__ : Optional[Any]=False ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE : str = jnp.bfloataa if fpaa else jnp.floataa SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) , dtype=UpperCAmelCase__ ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]], [1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]], [8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]], [3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]], # fmt: on ] ) def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) ->List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_latents(UpperCAmelCase__ , fpaa=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Dict = self.get_encoder_hidden_states(UpperCAmelCase__ , fpaa=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : List[str] = model.apply( {"""params""": params} , UpperCAmelCase__ , jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase__ , ).sample assert sample.shape == latents.shape SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : str = jnp.array(UpperCAmelCase__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]], [1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]], [8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]], [3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]], # fmt: on ] ) def _lowercase ( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) ->Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : List[str] = self.get_latents(UpperCAmelCase__ , shape=(4, 4, 9_6, 9_6) , fpaa=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_encoder_hidden_states(UpperCAmelCase__ , shape=(4, 7_7, 1_0_2_4) , fpaa=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : str = model.apply( {"""params""": params} , UpperCAmelCase__ , jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase__ , ).sample assert sample.shape == latents.shape SCREAMING_SNAKE_CASE : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Dict = jnp.array(UpperCAmelCase__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-2 )
245
0
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _UpperCamelCase = 50_0000 _UpperCamelCase , _UpperCamelCase = os.path.split(__file__) _UpperCamelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def UpperCamelCase_( snake_case__: datasets.Dataset , **snake_case__: int ) -> Optional[Any]: UpperCAmelCase__ = dataset.map(**snake_case__ ) @get_duration def UpperCamelCase_( snake_case__: datasets.Dataset , **snake_case__: Any ) -> Union[str, Any]: UpperCAmelCase__ = dataset.filter(**snake_case__ ) def UpperCamelCase_( ) -> List[Any]: UpperCAmelCase__ = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) UpperCAmelCase__ = generate_example_dataset( os.path.join(snake_case__ , 'dataset.arrow' ) , snake_case__ , num_examples=snake_case__ ) UpperCAmelCase__ = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=snake_case__ ) def tokenize(snake_case__: Any ): return tokenizer(examples['text'] ) UpperCAmelCase__ = map(snake_case__ ) UpperCAmelCase__ = map(snake_case__ , batched=snake_case__ ) UpperCAmelCase__ = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type='numpy' ): UpperCAmelCase__ = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type='pandas' ): UpperCAmelCase__ = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type='torch' , columns='numbers' ): UpperCAmelCase__ = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): UpperCAmelCase__ = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) UpperCAmelCase__ = map(snake_case__ , function=snake_case__ , batched=snake_case__ ) UpperCAmelCase__ = filter(snake_case__ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(snake_case__ , 'wb' ) as f: f.write(json.dumps(snake_case__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
363
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING _UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings(_UpperCamelCase ) class lowercase ( _UpperCamelCase ): '''simple docstring''' def __init__(self , **__a ) -> Optional[Any]: """simple docstring""" super().__init__(**__a ) requires_backends(self , 'vision' ) requires_backends(self , 'torch' ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) self.check_model_type(__a ) def UpperCamelCase__ (self , **__a ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = {} UpperCAmelCase__ = {} UpperCAmelCase__ = {} # preprocess args if "points_per_batch" in kwargs: UpperCAmelCase__ = kwargs['points_per_batch'] if "points_per_crop" in kwargs: UpperCAmelCase__ = kwargs['points_per_crop'] if "crops_n_layers" in kwargs: UpperCAmelCase__ = kwargs['crops_n_layers'] if "crop_overlap_ratio" in kwargs: UpperCAmelCase__ = kwargs['crop_overlap_ratio'] if "crop_n_points_downscale_factor" in kwargs: UpperCAmelCase__ = kwargs['crop_n_points_downscale_factor'] # postprocess args if "pred_iou_thresh" in kwargs: UpperCAmelCase__ = kwargs['pred_iou_thresh'] if "stability_score_offset" in kwargs: UpperCAmelCase__ = kwargs['stability_score_offset'] if "mask_threshold" in kwargs: UpperCAmelCase__ = kwargs['mask_threshold'] if "stability_score_thresh" in kwargs: UpperCAmelCase__ = kwargs['stability_score_thresh'] if "crops_nms_thresh" in kwargs: UpperCAmelCase__ = kwargs['crops_nms_thresh'] if "output_rle_mask" in kwargs: UpperCAmelCase__ = kwargs['output_rle_mask'] if "output_bboxes_mask" in kwargs: UpperCAmelCase__ = kwargs['output_bboxes_mask'] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> List[str]: """simple docstring""" return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a ) def UpperCamelCase__ (self , __a , __a=64 , __a = 0 , __a = 512 / 1500 , __a = 32 , __a = 1 , ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = load_image(__a ) UpperCAmelCase__ = self.image_processor.size['longest_edge'] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes( __a , __a , __a , __a , __a , __a ) UpperCAmelCase__ = self.image_processor(images=__a , return_tensors='pt' ) with self.device_placement(): if self.framework == "pt": UpperCAmelCase__ = self.get_inference_context() with inference_context(): UpperCAmelCase__ = self._ensure_tensor_on_device(__a , device=self.device ) UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) ) UpperCAmelCase__ = image_embeddings UpperCAmelCase__ = grid_points.shape[1] UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( 'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ' 'To return all points at once, set points_per_batch to None' ) for i in range(0 , __a , __a ): UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :] UpperCAmelCase__ = input_labels[:, i : i + points_per_batch] UpperCAmelCase__ = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def UpperCamelCase__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Dict: """simple docstring""" UpperCAmelCase__ = model_inputs.pop('input_boxes' ) UpperCAmelCase__ = model_inputs.pop('is_last' ) UpperCAmelCase__ = model_inputs.pop('original_sizes' ).tolist() UpperCAmelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist() UpperCAmelCase__ = self.model(**__a ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks UpperCAmelCase__ = model_outputs['pred_masks'] UpperCAmelCase__ = self.image_processor.post_process_masks( __a , __a , __a , __a , binarize=__a ) UpperCAmelCase__ = model_outputs['iou_scores'] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def UpperCamelCase__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Dict: """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = [] for model_output in model_outputs: all_scores.append(model_output.pop('iou_scores' ) ) all_masks.extend(model_output.pop('masks' ) ) all_boxes.append(model_output.pop('boxes' ) ) UpperCAmelCase__ = torch.cat(__a ) UpperCAmelCase__ = torch.cat(__a ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation( __a , __a , __a , __a ) UpperCAmelCase__ = defaultdict(__a ) for output in model_outputs: for k, v in output.items(): extra[k].append(__a ) UpperCAmelCase__ = {} if output_rle_mask: UpperCAmelCase__ = rle_mask if output_bboxes_mask: UpperCAmelCase__ = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
335
0
'''simple docstring''' UpperCAmelCase_ = [0, 2, 4, 6, 8] UpperCAmelCase_ = [1, 3, 5, 7, 9] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 UpperCAmelCase__ = 0 for digit in range(10 ): UpperCAmelCase__ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return result UpperCAmelCase__ = 0 for digita in range(10 ): UpperCAmelCase__ = digita if (remainder + digita) % 2 == 0: UpperCAmelCase__ = ODD_DIGITS else: UpperCAmelCase__ = EVEN_DIGITS for digita in other_parity_digits: UpperCAmelCase__ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) return result def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 9 ): '''simple docstring''' UpperCAmelCase__ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(SCREAMING_SNAKE_CASE__ , 0 , [0] * length , SCREAMING_SNAKE_CASE__ ) return result if __name__ == "__main__": print(f"{solution() = }")
346
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
1
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class _lowerCAmelCase ( enum.Enum ): """simple docstring""" lowerCamelCase = 0 lowerCamelCase = 1 lowerCamelCase = 2 @add_end_docstrings(__A ) class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[int]: super().__init__(*_lowerCamelCase , **_lowerCamelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. A_ : Union[str, Any] = None if self.model.config.prefix is not None: A_ : List[str] = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. A_ : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. A_ , A_ , A_ : List[str] = self._sanitize_parameters(prefix=_lowerCamelCase , **self._forward_params ) A_ : List[str] = {**self._preprocess_params, **preprocess_params} A_ : List[str] = {**self._forward_params, **forward_params} def UpperCAmelCase_ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ) -> Any: A_ : Union[str, Any] = {} if prefix is not None: A_ : Optional[int] = prefix if prefix: A_ : Optional[int] = self.tokenizer( _lowerCamelCase , padding=_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=self.framework ) A_ : Any = prefix_inputs["""input_ids"""].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" """ [None, 'hole']""" ) A_ : Optional[int] = handle_long_generation preprocess_params.update(_lowerCamelCase ) A_ : str = generate_kwargs A_ : Any = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" ) if return_tensors is not None: raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" ) A_ : Optional[int] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" ) A_ : List[str] = ReturnType.TENSORS if return_type is not None: A_ : Any = return_type if clean_up_tokenization_spaces is not None: A_ : Any = clean_up_tokenization_spaces if stop_sequence is not None: A_ : Tuple = self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) if len(_lowerCamelCase ) > 1: warnings.warn( """Stopping on a multiple token sequence is not yet supported on transformers. The first token of""" """ the stop sequence will be used as the stop sequence string in the interim.""" ) A_ : Union[str, Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[Any]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"""add_space_before_punct_symbol""": True} ) return super()._parse_and_tokenize(*_lowerCamelCase , **_lowerCamelCase ) def __call__( self , _lowerCamelCase , **_lowerCamelCase ) -> Optional[Any]: return super().__call__(_lowerCamelCase , **_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase=None , **_lowerCamelCase ) -> Optional[Any]: A_ : int = self.tokenizer( prefix + prompt_text , padding=_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=self.framework ) A_ : str = prompt_text if handle_long_generation == "hole": A_ : str = inputs["""input_ids"""].shape[-1] if "max_new_tokens" in generate_kwargs: A_ : List[str] = generate_kwargs["""max_new_tokens"""] else: A_ : Union[str, Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError("""We cannot infer how many new tokens are expected""" ) if cur_len + new_tokens > self.tokenizer.model_max_length: A_ : Tuple = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( """We cannot use `hole` to handle this generation the number of desired tokens exceeds the""" """ models max length""" ) A_ : List[Any] = inputs["""input_ids"""][:, -keep_length:] if "attention_mask" in inputs: A_ : Tuple = inputs["""attention_mask"""][:, -keep_length:] return inputs def UpperCAmelCase_ ( self , _lowerCamelCase , **_lowerCamelCase ) -> Tuple: A_ : Dict = model_inputs["""input_ids"""] A_ : Optional[Any] = model_inputs.get("""attention_mask""" , _lowerCamelCase ) # Allow empty prompts if input_ids.shape[1] == 0: A_ : Tuple = None A_ : List[Any] = None A_ : List[str] = 1 else: A_ : Dict = input_ids.shape[0] A_ : List[Any] = model_inputs.pop("""prompt_text""" ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. A_ : Tuple = generate_kwargs.pop("""prefix_length""" , 0 ) if prefix_length > 0: A_ : Any = """max_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].max_new_tokens is not None ) if not has_max_new_tokens: A_ : Tuple = generate_kwargs.get("""max_length""" ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length A_ : List[Any] = """min_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL A_ : int = self.model.generate(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , **_lowerCamelCase ) A_ : Any = generated_sequence.shape[0] if self.framework == "pt": A_ : Optional[int] = generated_sequence.reshape(_lowerCamelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": A_ : Tuple = tf.reshape(_lowerCamelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=ReturnType.FULL_TEXT , _lowerCamelCase=True ) -> Optional[Any]: A_ : Tuple = model_outputs["""generated_sequence"""][0] A_ : str = model_outputs["""input_ids"""] A_ : int = model_outputs["""prompt_text"""] A_ : int = generated_sequence.numpy().tolist() A_ : Any = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: A_ : List[Any] = {"""generated_token_ids""": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text A_ : int = self.tokenizer.decode( _lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: A_ : Optional[int] = 0 else: A_ : Tuple = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , ) ) if return_type == ReturnType.FULL_TEXT: A_ : Dict = prompt_text + text[prompt_length:] else: A_ : Any = text[prompt_length:] A_ : Union[str, Any] = {"""generated_text""": all_text} records.append(_lowerCamelCase ) return records
164
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCAmelCase ( a_ ) -> Any: """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> str: super().__init__() A_ : Any = module A_ : Any = nn.Sequential( nn.Linear(module.in_features , _lowerCamelCase , bias=_lowerCamelCase ) , nn.Linear(_lowerCamelCase , module.out_features , bias=_lowerCamelCase ) , ) A_ : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_lowerCamelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def UpperCAmelCase_ ( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]: return self.module(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) + self.adapter(_lowerCamelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" lowerCamelCase = '''bigscience/bloom-1b7''' # Constant values lowerCamelCase = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4 lowerCamelCase = '''Hello my name is''' lowerCamelCase = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) lowerCamelCase = 10 def UpperCAmelCase_ ( self ) -> List[str]: # Models and tokenizer A_ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class _lowerCAmelCase ( __A ): """simple docstring""" def UpperCAmelCase_ ( self ) -> Optional[Any]: super().setUp() # Models and tokenizer A_ : List[str] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""" ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" ) def UpperCAmelCase_ ( self ) -> Optional[int]: del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> List[Any]: A_ : str = self.model_abit.config self.assertTrue(hasattr(_lowerCamelCase , """quantization_config""" ) ) A_ : Union[str, Any] = config.to_dict() A_ : Optional[int] = config.to_diff_dict() A_ : Tuple = config.to_json_string() def UpperCAmelCase_ ( self ) -> str: from bitsandbytes.nn import Paramsabit A_ : List[Any] = self.model_fpaa.get_memory_footprint() A_ : Tuple = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) A_ : Union[str, Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def UpperCAmelCase_ ( self ) -> List[str]: from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_lowerCamelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" ) A_ : int = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS ) def UpperCAmelCase_ ( self ) -> Any: A_ : Dict = BitsAndBytesConfig() A_ : Tuple = True A_ : Any = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_lowerCamelCase , device_map="""auto""" ) A_ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" ) A_ : Optional[Any] = model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS ) def UpperCAmelCase_ ( self ) -> List[Any]: with self.assertRaises(_lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> List[str]: A_ : Union[str, Any] = BitsAndBytesConfig() with self.assertRaises(_lowerCamelCase ): A_ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_lowerCamelCase , load_in_abit=_lowerCamelCase , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def UpperCAmelCase_ ( self ) -> str: with self.assertRaises(_lowerCamelCase ): # Tries with `str` self.model_abit.to("""cpu""" ) with self.assertRaises(_lowerCamelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_lowerCamelCase ): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""" ) ) with self.assertRaises(_lowerCamelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_lowerCamelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything A_ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" ) A_ : Tuple = self.model_fpaa.to(torch.floataa ) A_ : int = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error A_ : Any = self.model_fpaa.to("""cpu""" ) # Check this does not throw an error A_ : str = self.model_fpaa.half() # Check this does not throw an error A_ : Any = self.model_fpaa.float() def UpperCAmelCase_ ( self ) -> Dict: A_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=_lowerCamelCase , device_map="""auto""" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @classmethod def UpperCAmelCase_ ( cls ) -> Optional[int]: A_ : Optional[int] = """t5-small""" A_ : List[str] = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense A_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name ) A_ : Optional[Any] = """Translate in German: Hello, my dog is cute""" def UpperCAmelCase_ ( self ) -> Optional[Any]: gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> Union[str, Any]: from transformers import TaForConditionalGeneration A_ : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules A_ : Any = None # test with `t5-small` A_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" ) A_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A_ : Optional[int] = model.generate(**_lowerCamelCase ) # test with `flan-t5-small` A_ : Tuple = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" ) A_ : Dict = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A_ : str = model.generate(**_lowerCamelCase ) A_ : Optional[int] = modules def UpperCAmelCase_ ( self ) -> List[Any]: import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` A_ : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) A_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A_ : List[Any] = model.generate(**_lowerCamelCase ) # test with `flan-t5-small` A_ : Union[str, Any] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" ) A_ : int = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) A_ : Optional[int] = model.generate(**_lowerCamelCase ) class _lowerCAmelCase ( __A ): """simple docstring""" def UpperCAmelCase_ ( self ) -> int: super().setUp() # model_name A_ : Dict = """bigscience/bloom-560m""" A_ : Union[str, Any] = """t5-small""" # Different types of model A_ : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" ) # Sequence classification model A_ : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" ) # CausalLM model A_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" ) # Seq2seq model A_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_lowerCamelCase , device_map="""auto""" ) def UpperCAmelCase_ ( self ) -> Optional[Any]: del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> List[Any]: from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class _lowerCAmelCase ( __A ): """simple docstring""" def UpperCAmelCase_ ( self ) -> str: super().setUp() def UpperCAmelCase_ ( self ) -> Any: del self.pipe gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : List[str] = pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass A_ : int = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class _lowerCAmelCase ( __A ): """simple docstring""" def UpperCAmelCase_ ( self ) -> str: super().setUp() def UpperCAmelCase_ ( self ) -> str: A_ : List[str] = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_lowerCamelCase , device_map="""balanced""" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model A_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" ) # Second real batch A_ : int = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS ) class _lowerCAmelCase ( __A ): """simple docstring""" def UpperCAmelCase_ ( self ) -> Tuple: A_ : Union[str, Any] = """facebook/opt-350m""" super().setUp() def UpperCAmelCase_ ( self ) -> Optional[Any]: if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ): return # Step 1: freeze all parameters A_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): A_ : Optional[Any] = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability A_ : Any = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_lowerCamelCase ) ): A_ : int = LoRALayer(module.q_proj , rank=16 ) A_ : Optional[int] = LoRALayer(module.k_proj , rank=16 ) A_ : Union[str, Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch A_ : Dict = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): A_ : Dict = model.forward(**_lowerCamelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(_lowerCamelCase , _lowerCamelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_lowerCamelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = '''gpt2-xl''' lowerCamelCase = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
164
1
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ): snake_case__ : List[Any] = parent snake_case__ : List[Any] = batch_size snake_case__ : int = image_size snake_case__ : List[Any] = num_channels snake_case__ : Optional[Any] = embeddings_size snake_case__ : Optional[int] = hidden_sizes snake_case__ : Tuple = depths snake_case__ : Any = is_training snake_case__ : Optional[int] = use_labels snake_case__ : Optional[int] = hidden_act snake_case__ : Optional[int] = num_labels snake_case__ : int = scope snake_case__ : Tuple = len(snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : int ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ): snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ ) snake_case__ : int = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ): snake_case__ : str = self.num_labels snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ ) snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self : Tuple ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a , _a , unittest.TestCase ): """simple docstring""" lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowercase = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def lowerCamelCase ( self : Optional[int] ): snake_case__ : Tuple = TFResNetModelTester(self ) snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowerCamelCase ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : str ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase ( self : int ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase ( self : List[Any] ): pass def lowerCamelCase ( self : List[Any] ): snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = model_class(snake_case_ ) snake_case__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self : List[str] ): def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ): snake_case__ : List[Any] = model_class(snake_case_ ) snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[Any] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ : Dict = layer_type snake_case__ : Optional[int] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[Any] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self : Optional[Any] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def __snake_case( ) -> Optional[int]: snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : List[Any] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case__ : List[Any] = self.default_image_processor snake_case__ : List[Any] = prepare_img() snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" ) # forward pass snake_case__ : Optional[Any] = model(**snake_case_ ) # verify the logits snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
35
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def _lowerCamelCase( a ): return getitem, k def _lowerCamelCase( a , a ): return setitem, k, v def _lowerCamelCase( a ): return delitem, k def _lowerCamelCase( a , a , *a ): try: return fun(a , *a ), None except Exception as e: return None, e SCREAMING_SNAKE_CASE__:List[Any] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] SCREAMING_SNAKE_CASE__:List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] SCREAMING_SNAKE_CASE__:Any = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] SCREAMING_SNAKE_CASE__:int = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] SCREAMING_SNAKE_CASE__:Any = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def _lowerCamelCase( a ): __a = HashMap(initial_block_size=4 ) __a = {} for _, (fun, *args) in enumerate(a ): __a , __a = _run_operation(a , a , *a ) __a , __a = _run_operation(a , a , *a ) assert my_res == py_res assert str(a ) == str(a ) assert set(a ) == set(a ) assert len(a ) == len(a ) assert set(my.items() ) == set(py.items() ) def _lowerCamelCase( ): def is_public(a ) -> bool: return not name.startswith("_" ) __a = {name for name in dir({} ) if is_public(a )} __a = {name for name in dir(HashMap() ) if is_public(a )} assert dict_public_names > hash_public_names
261
0
'''simple docstring''' from __future__ import annotations A__ : Tuple = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0] A__ : str = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1] def a_ ( _UpperCAmelCase : list[float] ) -> list[float]: __snake_case : Optional[Any] = [] __snake_case : Optional[int] = len(_UpperCAmelCase ) for i in range(_UpperCAmelCase ): __snake_case : float = -1 for j in range(i + 1 ,_UpperCAmelCase ): if arr[i] < arr[j]: __snake_case : Dict = arr[j] break result.append(_UpperCAmelCase ) return result def a_ ( _UpperCAmelCase : list[float] ) -> list[float]: __snake_case : Tuple = [] for i, outer in enumerate(_UpperCAmelCase ): __snake_case : float = -1 for inner in arr[i + 1 :]: if outer < inner: __snake_case : Union[str, Any] = inner break result.append(_UpperCAmelCase ) return result def a_ ( _UpperCAmelCase : list[float] ) -> list[float]: __snake_case : List[str] = len(_UpperCAmelCase ) __snake_case : list[float] = [] __snake_case : list[float] = [-1] * arr_size for index in reversed(range(_UpperCAmelCase ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: __snake_case : Any = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) A__ : Tuple = ( '''from __main__ import arr, next_greatest_element_slow, ''' '''next_greatest_element_fast, next_greatest_element''' ) print( '''next_greatest_element_slow():''', timeit('''next_greatest_element_slow(arr)''', setup=setup), ) print( '''next_greatest_element_fast():''', timeit('''next_greatest_element_fast(arr)''', setup=setup), ) print( ''' next_greatest_element():''', timeit('''next_greatest_element(arr)''', setup=setup), )
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class snake_case__ ( unittest.TestCase ): def A_ ( self : int ) -> List[Any]: '''simple docstring''' __snake_case : Any = tempfile.mkdtemp() # fmt: off __snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) __snake_case : List[str] = { 'do_resize': True, 'size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } __snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(__a , __a ) def A_ ( self : Optional[int] , **__a : Dict ) -> int: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : int , **__a : Dict ) -> Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : Dict = self.get_image_processor() __snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) processor.save_pretrained(self.tmpdirname ) __snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : Optional[Any] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 ) __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = self.prepare_image_inputs() __snake_case : List[str] = image_processor(__a , return_tensors='np' ) __snake_case : List[str] = processor(images=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Optional[int] = 'lower newer' __snake_case : Dict = processor(text=__a ) __snake_case : List[Any] = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : List[Any] = 'lower newer' __snake_case : Optional[Any] = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(__a ): processor() def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Union[str, Any] = self.get_image_processor() __snake_case : Any = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case : int = processor.batch_decode(__a ) __snake_case : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def A_ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = self.get_image_processor() __snake_case : Dict = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Union[str, Any] = 'lower newer' __snake_case : Tuple = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
0
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer a__: Dict = logging.get_logger(__name__) a__: str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a__: Any = { 'vocab_file': { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json' ), 'distilbert-base-german-cased': ( 'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json' ), 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json' ), }, } a__: Any = { 'distilbert-base-uncased': 512, 'distilbert-base-uncased-distilled-squad': 512, 'distilbert-base-cased': 512, 'distilbert-base-cased-distilled-squad': 512, 'distilbert-base-german-cased': 512, 'distilbert-base-multilingual-cased': 512, } a__: Optional[Any] = { 'distilbert-base-uncased': {'do_lower_case': True}, 'distilbert-base-uncased-distilled-squad': {'do_lower_case': True}, 'distilbert-base-cased': {'do_lower_case': False}, 'distilbert-base-cased-distilled-squad': {'do_lower_case': False}, 'distilbert-base-german-cased': {'do_lower_case': False}, 'distilbert-base-multilingual-cased': {'do_lower_case': False}, } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask'''] __SCREAMING_SNAKE_CASE = DistilBertTokenizer def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase="[UNK]",__lowerCamelCase="[SEP]",__lowerCamelCase="[PAD]",__lowerCamelCase="[CLS]",__lowerCamelCase="[MASK]",__lowerCamelCase=True,__lowerCamelCase=None,**__lowerCamelCase,): super().__init__( __lowerCamelCase,tokenizer_file=__lowerCamelCase,do_lower_case=__lowerCamelCase,unk_token=__lowerCamelCase,sep_token=__lowerCamelCase,pad_token=__lowerCamelCase,cls_token=__lowerCamelCase,mask_token=__lowerCamelCase,tokenize_chinese_chars=__lowerCamelCase,strip_accents=__lowerCamelCase,**__lowerCamelCase,) A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''',__lowerCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''',__lowerCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''',__lowerCamelCase ) != tokenize_chinese_chars ): A__ = getattr(__lowerCamelCase,normalizer_state.pop('''type''' ) ) A__ = do_lower_case A__ = strip_accents A__ = tokenize_chinese_chars A__ = normalizer_class(**__lowerCamelCase ) A__ = do_lower_case def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ): A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ): A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ): A__ = self._tokenizer.model.save(__lowerCamelCase,name=__lowerCamelCase ) return tuple(__lowerCamelCase )
193
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): __SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,) __SCREAMING_SNAKE_CASE = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def UpperCamelCase ( self,**__lowerCamelCase ): A__ = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**__lowerCamelCase ) return config def UpperCamelCase ( self,**__lowerCamelCase ): A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**__lowerCamelCase ) A__ = scheduler_class(**__lowerCamelCase ) A__ , A__ = 10, 0.0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for t in scheduler.timesteps: A__ = model(__lowerCamelCase,__lowerCamelCase ) A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample return sample def UpperCamelCase ( self ): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def UpperCamelCase ( self ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(steps_offset=1 ) A__ = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps,torch.LongTensor([801, 601, 401, 201, 1] ) ) def UpperCamelCase ( self ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1],[0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__lowerCamelCase,beta_end=__lowerCamelCase ) def UpperCamelCase ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def UpperCamelCase ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def UpperCamelCase ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowerCamelCase ) def UpperCamelCase ( self ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=__lowerCamelCase ) def UpperCamelCase ( self ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase ) def UpperCamelCase ( self ): self.check_over_configs(thresholding=__lowerCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=__lowerCamelCase,prediction_type=__lowerCamelCase,sample_max_value=__lowerCamelCase,) def UpperCamelCase ( self ): for t in [1, 10, 49]: self.check_over_forward(time_step=__lowerCamelCase ) def UpperCamelCase ( self ): for t, num_inference_steps in zip([1, 10, 50],[10, 50, 500] ): self.check_over_forward(time_step=__lowerCamelCase,num_inference_steps=__lowerCamelCase ) def UpperCamelCase ( self ): for t, eta in zip([1, 10, 49],[0.0, 0.5, 1.0] ): self.check_over_forward(time_step=__lowerCamelCase,eta=__lowerCamelCase ) def UpperCamelCase ( self ): A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**__lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420,400 ) - 0.14771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980,960 ) - 0.32460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487,486 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999,998 ) - 0.02 ) ) < 1E-5 def UpperCamelCase ( self ): A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**__lowerCamelCase ) A__ , A__ = 10, 0.0 scheduler.set_timesteps(__lowerCamelCase ) A__ = self.dummy_model() A__ = self.dummy_sample_deter A__ = self.dummy_sample_deter + 0.1 A__ = self.dummy_sample_deter - 0.1 A__ = samplea.shape[0] A__ = torch.stack([samplea, samplea, samplea],dim=0 ) A__ = torch.arange(__lowerCamelCase )[0:3, None].repeat(1,__lowerCamelCase ) A__ = model(samples.flatten(0,1 ),timesteps.flatten(0,1 ) ) A__ = scheduler.batch_step_no_noise(__lowerCamelCase,timesteps.flatten(0,1 ),samples.flatten(0,1 ),__lowerCamelCase ) A__ = torch.sum(torch.abs(__lowerCamelCase ) ) A__ = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.4982 ) < 1E-3 def UpperCamelCase ( self ): A__ = self.full_loop() A__ = torch.sum(torch.abs(__lowerCamelCase ) ) A__ = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.223967 ) < 1E-3 def UpperCamelCase ( self ): A__ = self.full_loop(prediction_type='''v_prediction''' ) A__ = torch.sum(torch.abs(__lowerCamelCase ) ) A__ = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 52.5302 ) < 1E-2 assert abs(result_mean.item() - 0.0684 ) < 1E-3 def UpperCamelCase ( self ): # We specify different beta, so that the first alpha is 0.99 A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 ) A__ = torch.sum(torch.abs(__lowerCamelCase ) ) A__ = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.1951 ) < 1E-3 def UpperCamelCase ( self ): # We specify different beta, so that the first alpha is 0.99 A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 ) A__ = torch.sum(torch.abs(__lowerCamelCase ) ) A__ = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.1941 ) < 1E-3
193
1
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys lowerCamelCase_ = "3" print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
361
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __A: """simple docstring""" @staticmethod def UpperCAmelCase_ (*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): pass def __magic_name__ ( __a : Image ): '''simple docstring''' UpperCamelCase__ = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __magic_name__ ( __a : Image ): '''simple docstring''' UpperCamelCase__ = np.array(__a ) UpperCamelCase__ = npimg.shape return {"hash": hashimage(__a ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __A( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) SCREAMING_SNAKE_CASE__ = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def UpperCAmelCase_ (self ): pass @slow @require_torch def UpperCAmelCase_ (self ): UpperCamelCase__ = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) UpperCamelCase__ = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 ) # Shortening by hashing UpperCamelCase__ = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = """facebook/sam-vit-huge""" UpperCamelCase__ = pipeline("""mask-generation""" , model=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 ) # Shortening by hashing UpperCamelCase__ = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053}, ] , )
178
0
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Dict = "t5" A__ : List[str] = ["past_key_values"] A__ : Optional[int] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self: List[str] ,lowerCamelCase_: Dict=32128 ,lowerCamelCase_: Any=512 ,lowerCamelCase_: Union[str, Any]=64 ,lowerCamelCase_: str=2048 ,lowerCamelCase_: Optional[int]=6 ,lowerCamelCase_: List[str]=None ,lowerCamelCase_: int=8 ,lowerCamelCase_: str=32 ,lowerCamelCase_: Optional[int]=128 ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: List[Any]=1e-6 ,lowerCamelCase_: Dict=1.0 ,lowerCamelCase_: Tuple="relu" ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: str=1 ,**lowerCamelCase_: Tuple ,) -> List[str]: UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Dict = d_model UpperCAmelCase_ : Optional[int] = d_kv UpperCAmelCase_ : Dict = d_ff UpperCAmelCase_ : Dict = num_layers UpperCAmelCase_ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Union[str, Any] = num_heads UpperCAmelCase_ : List[Any] = relative_attention_num_buckets UpperCAmelCase_ : List[Any] = relative_attention_max_distance UpperCAmelCase_ : Optional[Any] = dropout_rate UpperCAmelCase_ : Any = layer_norm_epsilon UpperCAmelCase_ : Optional[Any] = initializer_factor UpperCAmelCase_ : int = feed_forward_proj UpperCAmelCase_ : Optional[Any] = use_cache UpperCAmelCase_ : str = self.feed_forward_proj.split("""-""" ) UpperCAmelCase_ : int = act_info[-1] UpperCAmelCase_ : Tuple = act_info[0] == """gated""" if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : List[str] = """gelu_new""" super().__init__( pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,is_encoder_decoder=lowerCamelCase_ ,**lowerCamelCase_ ,) class _snake_case ( __snake_case ): '''simple docstring''' @property def A__ ( self: str ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase_ : int = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: UpperCAmelCase_ : Any = """past_encoder_sequence + sequence""" UpperCAmelCase_ : Optional[int] = {0: """batch"""} UpperCAmelCase_ : List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: UpperCAmelCase_ : Tuple = {0: """batch""", 1: """decoder_sequence"""} UpperCAmelCase_ : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" ) return common_inputs @property def A__ ( self: Optional[Any] ) -> int: return 13
345
import random from typing import Any def lowerCamelCase_ ( _a : list ): '''simple docstring''' for _ in range(len(_a ) ): UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
345
1
import argparse from collections import defaultdict import yaml _UpperCAmelCase = 'docs/source/en/_toctree.yml' def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple ) -> Any: __lowerCAmelCase : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = [] __lowerCAmelCase : Tuple = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = new_doc_list __lowerCAmelCase : int = [key for key, value in counts.items() if value > 1] __lowerCAmelCase : int = [] for duplicate_key in duplicates: __lowerCAmelCase : Optional[int] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(SCREAMING_SNAKE_CASE ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) __lowerCAmelCase : Optional[Any] = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : s["title"].lower() ) # "overview" gets special treatment and is always first if len(SCREAMING_SNAKE_CASE ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(SCREAMING_SNAKE_CASE ) # Sort return overview_doc def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any]=False ) -> int: with open(SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f: __lowerCAmelCase : str = yaml.safe_load(f.read() ) # Get to the API doc __lowerCAmelCase : Any = 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowerCAmelCase : List[str] = content[api_idx]["""sections"""] # Then to the model doc __lowerCAmelCase : List[Any] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 __lowerCAmelCase : str = api_doc[scheduler_idx]["""sections"""] __lowerCAmelCase : List[Any] = clean_doc_toc(SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = False if new_scheduler_doc != scheduler_doc: __lowerCAmelCase : Optional[Any] = True if overwrite: __lowerCAmelCase : Any = new_scheduler_doc if diff: if overwrite: __lowerCAmelCase : Tuple = api_doc with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any]=False ) -> str: with open(SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f: __lowerCAmelCase : str = yaml.safe_load(f.read() ) # Get to the API doc __lowerCAmelCase : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowerCAmelCase : List[Any] = content[api_idx]["""sections"""] # Then to the model doc __lowerCAmelCase : List[Any] = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 __lowerCAmelCase : List[Any] = False __lowerCAmelCase : List[str] = api_doc[pipeline_idx]["""sections"""] __lowerCAmelCase : Any = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: __lowerCAmelCase : Optional[int] = pipeline_doc["""section"""] __lowerCAmelCase : Union[str, Any] = clean_doc_toc(SCREAMING_SNAKE_CASE ) if overwrite: __lowerCAmelCase : List[Any] = new_sub_pipeline_doc new_pipeline_docs.append(SCREAMING_SNAKE_CASE ) # sort overall pipeline doc __lowerCAmelCase : List[str] = clean_doc_toc(SCREAMING_SNAKE_CASE ) if new_pipeline_docs != pipeline_docs: __lowerCAmelCase : Any = True if overwrite: __lowerCAmelCase : Tuple = new_pipeline_docs if diff: if overwrite: __lowerCAmelCase : Optional[Any] = api_doc with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') _UpperCAmelCase = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
355
_UpperCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} _UpperCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :dict[int, list[int]] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[bool] ) -> list[int]: __lowerCAmelCase : str = True __lowerCAmelCase : str = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) order.append(SCREAMING_SNAKE_CASE ) return order def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :dict[int, list[int]] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[bool] ) -> list[int]: __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : Union[str, Any] = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return component def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :dict[int, list[int]] ) -> list[list[int]]: __lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) * [False] __lowerCAmelCase : dict[int, list[int]] = {vert: [] for vert in range(len(SCREAMING_SNAKE_CASE ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = [] for i, was_visited in enumerate(SCREAMING_SNAKE_CASE ): if not was_visited: order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = [] __lowerCAmelCase : Any = len(SCREAMING_SNAKE_CASE ) * [False] for i in range(len(SCREAMING_SNAKE_CASE ) ): __lowerCAmelCase : Optional[int] = order[len(SCREAMING_SNAKE_CASE ) - i - 1] if not visited[vert]: __lowerCAmelCase : Any = find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) components_list.append(SCREAMING_SNAKE_CASE ) return components_list
232
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase : str = { """configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""], """configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = ["""MaskFormerFeatureExtractor"""] lowercase : int = ["""MaskFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = [ """MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """MaskFormerForInstanceSegmentation""", """MaskFormerModel""", """MaskFormerPreTrainedModel""", ] lowercase : Union[str, Any] = [ """MaskFormerSwinBackbone""", """MaskFormerSwinModel""", """MaskFormerSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
20
"""simple docstring""" import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser a_ = logging.getLogger(__name__) torch.set_grad_enabled(False) a_ = """cuda""" if torch.cuda.is_available() else """cpu""" def __lowercase ( snake_case_ : str ,snake_case_ : Tuple=100 ,snake_case_ : int=" " ) ->List[str]: '''simple docstring''' __A : Dict = text.split(snake_case_ ) return [character.join(text[i : i + n] ).strip() for i in range(0 ,len(snake_case_ ) ,snake_case_ )] def __lowercase ( snake_case_ : dict ) ->dict: '''simple docstring''' __A , __A : Optional[int] = [], [] for title, text in zip(documents['''title'''] ,documents['''text'''] ): if text is not None: for passage in split_text(snake_case_ ): titles.append(title if title is not None else '''''' ) texts.append(snake_case_ ) return {"title": titles, "text": texts} def __lowercase ( snake_case_ : dict ,snake_case_ : DPRContextEncoder ,snake_case_ : DPRContextEncoderTokenizerFast ) ->dict: '''simple docstring''' __A : Tuple = ctx_tokenizer( documents['''title'''] ,documents['''text'''] ,truncation=snake_case_ ,padding='''longest''' ,return_tensors='''pt''' )['''input_ids'''] __A : int = ctx_encoder(input_ids.to(device=snake_case_ ) ,return_dict=snake_case_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def __lowercase ( snake_case_ : "RagExampleArguments" ,snake_case_ : "ProcessingArguments" ,snake_case_ : "IndexHnswArguments" ,) ->Tuple: '''simple docstring''' logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way __A : Optional[Any] = load_dataset( '''csv''' ,data_files=[rag_example_args.csv_path] ,split='''train''' ,delimiter='''\t''' ,column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words __A : str = dataset.map(snake_case_ ,batched=snake_case_ ,num_proc=processing_args.num_proc ) # And compute the embeddings __A : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case_ ) __A : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) __A : Optional[int] = Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space __A : List[Any] = dataset.map( partial(snake_case_ ,ctx_encoder=snake_case_ ,ctx_tokenizer=snake_case_ ) ,batched=snake_case_ ,batch_size=processing_args.batch_size ,features=snake_case_ ,) # And finally save your dataset __A : int = os.path.join(rag_example_args.output_dir ,'''my_knowledge_dataset''' ) dataset.save_to_disk(snake_case_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search __A : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d ,index_hnsw_args.m ,faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' ,custom_index=snake_case_ ) # And save the index __A : List[str] = os.path.join(rag_example_args.output_dir ,'''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(snake_case_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __snake_case : """simple docstring""" _lowerCamelCase = field( default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , ) _lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , ) _lowerCamelCase = field( default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , ) _lowerCamelCase = field( default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={ """help""": ( """The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or""" """ 'facebook/dpr-ctx_encoder-multiset-base'""" ) } , ) _lowerCamelCase = field( default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , ) @dataclass class __snake_case : """simple docstring""" _lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={ """help""": """The number of processes to use to split the documents into passages. Default is single process.""" } , ) _lowerCamelCase = field( default=16 , metadata={ """help""": """The batch size to use when computing the passages embeddings using the DPR context encoder.""" } , ) @dataclass class __snake_case : """simple docstring""" _lowerCamelCase = field( default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , ) _lowerCamelCase = field( default=1_28 , metadata={ """help""": ( """The number of bi-directional links created for every new element during the HNSW index construction.""" ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) a_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) a_ , a_ , a_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: a_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
179
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _UpperCAmelCase : Any = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[Any] = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys _UpperCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
354
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class a__ ( __A ): """simple docstring""" __UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa' __UpperCamelCase : List[str] = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) __UpperCamelCase : Optional[int] = 'document_qa' __UpperCamelCase : Optional[int] = AutoProcessor __UpperCamelCase : Tuple = VisionEncoderDecoderModel __UpperCamelCase : Any = ['image', 'text'] __UpperCamelCase : Optional[Any] = ['text'] def __init__(self , *__lowercase , **__lowercase ): if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase ) __lowerCAmelCase = self.pre_processor.tokenizer( __lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids __lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _snake_case (self , __lowercase ): return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences def _snake_case (self , __lowercase ): __lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0] __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token __lowerCAmelCase = self.pre_processor.tokenajson(__lowercase ) return sequence["answer"]
9
0
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class UpperCamelCase_ : """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase__ : List[str] ) -> int: __SCREAMING_SNAKE_CASE = data __SCREAMING_SNAKE_CASE = [0x6_7_4_5_2_3_0_1, 0xe_f_c_d_a_b_8_9, 0x9_8_b_a_d_c_f_e, 0x1_0_3_2_5_4_7_6, 0xc_3_d_2_e_1_f_0] @staticmethod def UpperCAmelCase_ ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ) -> Tuple: return ((n << b) | (n >> (3_2 - b))) & 0xf_f_f_f_f_f_f_f def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (6_3 - (len(self.data ) + 8) % 6_4) __SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) ) return padded_data def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]: return [ self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 ) ] def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = list(struct.unpack(">16L" , a_ ) ) + [0] * 6_4 for i in range(1_6 , 8_0 ): __SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 ) return w def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.padding() __SCREAMING_SNAKE_CASE = self.split_blocks() for block in self.blocks: __SCREAMING_SNAKE_CASE = self.expand_block(a_ ) __SCREAMING_SNAKE_CASE = self.h for i in range(0 , 8_0 ): if 0 <= i < 2_0: __SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d) __SCREAMING_SNAKE_CASE = 0x5_a_8_2_7_9_9_9 elif 2_0 <= i < 4_0: __SCREAMING_SNAKE_CASE = b ^ c ^ d __SCREAMING_SNAKE_CASE = 0x6_e_d_9_e_b_a_1 elif 4_0 <= i < 6_0: __SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d) __SCREAMING_SNAKE_CASE = 0x8_f_1_b_b_c_d_c elif 6_0 <= i < 8_0: __SCREAMING_SNAKE_CASE = b ^ c ^ d __SCREAMING_SNAKE_CASE = 0xc_a_6_2_c_1_d_6 __SCREAMING_SNAKE_CASE = ( self.rotate(a_ , 5 ) + f + e + k + expanded_block[i] & 0xf_f_f_f_f_f_f_f, a, self.rotate(a_ , 3_0 ), c, d, ) __SCREAMING_SNAKE_CASE = ( self.h[0] + a & 0xf_f_f_f_f_f_f_f, self.h[1] + b & 0xf_f_f_f_f_f_f_f, self.h[2] + c & 0xf_f_f_f_f_f_f_f, self.h[3] + d & 0xf_f_f_f_f_f_f_f, self.h[4] + e & 0xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = B"""Test String""" assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324 def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Process some strings or files" ) parser.add_argument( "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , "rb" ) as f: __SCREAMING_SNAKE_CASE = f.read() else: __SCREAMING_SNAKE_CASE = bytes(snake_case__ , "utf-8" ) print(SHAaHash(snake_case__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
54
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["note_seq"] def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ): '''simple docstring''' requires_backends(self, ["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ): '''simple docstring''' requires_backends(cls, ["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ): '''simple docstring''' requires_backends(cls, ["""note_seq"""] )
64
0
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig _A : List[str] =logging.get_logger(__name__) # General docstring _A : Tuple ='''MobileNetV1Config''' # Base docstring _A : Optional[Any] ='''google/mobilenet_v1_1.0_224''' _A : Tuple =[1, 1_024, 7, 7] # Image classification docstring _A : Optional[int] ='''google/mobilenet_v1_1.0_224''' _A : int ='''tabby, tabby cat''' _A : List[Any] =[ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> Union[str, Any]: lowerCamelCase__ : Optional[int] = {} if isinstance(a__ , a__ ): lowerCamelCase__ : List[str] = model.mobilenet_va else: lowerCamelCase__ : Union[str, Any] = model lowerCamelCase__ : Optional[int] = '''MobilenetV1/Conv2d_0/''' lowerCamelCase__ : Tuple = backbone.conv_stem.convolution.weight lowerCamelCase__ : Tuple = backbone.conv_stem.normalization.bias lowerCamelCase__ : Optional[Any] = backbone.conv_stem.normalization.weight lowerCamelCase__ : Union[str, Any] = backbone.conv_stem.normalization.running_mean lowerCamelCase__ : Any = backbone.conv_stem.normalization.running_var for i in range(13 ): lowerCamelCase__ : Dict = i + 1 lowerCamelCase__ : Union[str, Any] = i * 2 lowerCamelCase__ : Any = backbone.layer[pt_index] lowerCamelCase__ : Optional[Any] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' lowerCamelCase__ : Any = pointer.convolution.weight lowerCamelCase__ : Tuple = pointer.normalization.bias lowerCamelCase__ : List[Any] = pointer.normalization.weight lowerCamelCase__ : Optional[Any] = pointer.normalization.running_mean lowerCamelCase__ : List[Any] = pointer.normalization.running_var lowerCamelCase__ : List[Any] = backbone.layer[pt_index + 1] lowerCamelCase__ : Any = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' lowerCamelCase__ : Dict = pointer.convolution.weight lowerCamelCase__ : Optional[Any] = pointer.normalization.bias lowerCamelCase__ : Optional[Any] = pointer.normalization.weight lowerCamelCase__ : int = pointer.normalization.running_mean lowerCamelCase__ : str = pointer.normalization.running_var if isinstance(a__ , a__ ): lowerCamelCase__ : List[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/''' lowerCamelCase__ : List[str] = model.classifier.weight lowerCamelCase__ : List[str] = model.classifier.bias return tf_to_pt_map def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: try: import numpy as np import tensorflow as tf except ImportError: logger.error( """Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """ """https://www.tensorflow.org/install/ for installation instructions.""" ) raise # Load weights from TF model lowerCamelCase__ : Optional[Any] = tf.train.list_variables(a__ ) lowerCamelCase__ : List[Any] = {} for name, shape in init_vars: logger.info(f'''Loading TF weight {name} with shape {shape}''' ) lowerCamelCase__ : Tuple = tf.train.load_variable(a__ , a__ ) lowerCamelCase__ : Dict = array # Build TF to PyTorch weights loading map lowerCamelCase__ : int = _build_tf_to_pytorch_map(a__ , a__ , a__ ) for name, pointer in tf_to_pt_map.items(): logger.info(f'''Importing {name}''' ) if name not in tf_weights: logger.info(f'''{name} not in tf pre-trained weights, skipping''' ) continue lowerCamelCase__ : Union[str, Any] = tf_weights[name] if "depthwise_weights" in name: logger.info("""Transposing depthwise""" ) lowerCamelCase__ : Tuple = np.transpose(a__ , (2, 3, 0, 1) ) elif "weights" in name: logger.info("""Transposing""" ) if len(pointer.shape ) == 2: # copying into linear layer lowerCamelCase__ : Union[str, Any] = array.squeeze().transpose() else: lowerCamelCase__ : Optional[int] = np.transpose(a__ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' ) lowerCamelCase__ : Tuple = torch.from_numpy(a__ ) tf_weights.pop(a__ , a__ ) tf_weights.pop(name + """/RMSProp""" , a__ ) tf_weights.pop(name + """/RMSProp_1""" , a__ ) tf_weights.pop(name + """/ExponentialMovingAverage""" , a__ ) logger.info(f'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' ) return model def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]: lowerCamelCase__ : List[Any] = features.shape[-2:] lowerCamelCase__ : List[Any] = conv_layer.stride lowerCamelCase__ : Optional[int] = conv_layer.kernel_size if in_height % stride_height == 0: lowerCamelCase__ : List[str] = max(kernel_height - stride_height , 0 ) else: lowerCamelCase__ : str = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: lowerCamelCase__ : int = max(kernel_width - stride_width , 0 ) else: lowerCamelCase__ : Tuple = max(kernel_width - (in_width % stride_width) , 0 ) lowerCamelCase__ : List[str] = pad_along_width // 2 lowerCamelCase__ : Any = pad_along_width - pad_left lowerCamelCase__ : str = pad_along_height // 2 lowerCamelCase__ : Optional[int] = pad_along_height - pad_top lowerCamelCase__ : List[Any] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(a__ , a__ , """constant""" , 0.0 ) class _lowercase ( nn.Module ): def __init__( self: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: Tuple = 1 , UpperCamelCase__: Tuple = 1 , UpperCamelCase__: Any = False , UpperCamelCase__: Optional[int] = True , UpperCamelCase__: Dict = True , ): super().__init__() lowerCamelCase__ : Any = config if in_channels % groups != 0: raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) lowerCamelCase__ : Any = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) lowerCamelCase__ : List[str] = nn.Convad( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=_lowerCamelCase , stride=_lowerCamelCase , padding=_lowerCamelCase , groups=_lowerCamelCase , bias=_lowerCamelCase , padding_mode="""zeros""" , ) if use_normalization: lowerCamelCase__ : List[Any] = nn.BatchNormad( num_features=_lowerCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=_lowerCamelCase , track_running_stats=_lowerCamelCase , ) else: lowerCamelCase__ : Dict = None if use_activation: if isinstance(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase__ : Any = ACTaFN[use_activation] elif isinstance(config.hidden_act , _lowerCamelCase ): lowerCamelCase__ : List[str] = ACTaFN[config.hidden_act] else: lowerCamelCase__ : List[Any] = config.hidden_act else: lowerCamelCase__ : Optional[Any] = None def lowerCamelCase_ ( self: int , UpperCamelCase__: Optional[Any] ): if self.config.tf_padding: lowerCamelCase__ : List[Any] = apply_tf_padding(_lowerCamelCase , self.convolution ) lowerCamelCase__ : Dict = self.convolution(_lowerCamelCase ) if self.normalization is not None: lowerCamelCase__ : int = self.normalization(_lowerCamelCase ) if self.activation is not None: lowerCamelCase__ : List[Any] = self.activation(_lowerCamelCase ) return features class _lowercase ( a__ ): a = MobileNetVaConfig a = load_tf_weights_in_mobilenet_va a = 'mobilenet_v1' a = 'pixel_values' a = False def lowerCamelCase_ ( self: int , UpperCamelCase__: int ): if isinstance(_lowerCamelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(_lowerCamelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) _A : str =r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' _A : Union[str, Any] =r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( """The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , a__ , ) class _lowercase ( a__ ): def __init__( self: int , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] = True ): super().__init__(_lowerCamelCase ) lowerCamelCase__ : Optional[Any] = config lowerCamelCase__ : Dict = 32 lowerCamelCase__ : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) lowerCamelCase__ : str = MobileNetVaConvLayer( _lowerCamelCase , in_channels=config.num_channels , out_channels=_lowerCamelCase , kernel_size=3 , stride=2 , ) lowerCamelCase__ : Union[str, Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] lowerCamelCase__ : Any = nn.ModuleList() for i in range(13 ): lowerCamelCase__ : int = out_channels if strides[i] == 2 or i == 0: depth *= 2 lowerCamelCase__ : Tuple = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( _lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=_lowerCamelCase , ) ) self.layer.append( MobileNetVaConvLayer( _lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=1 , ) ) lowerCamelCase__ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: int ): raise NotImplementedError @add_start_docstrings_to_model_forward(_lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: List[Any] = None , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Union[str, Any] = None , ): lowerCamelCase__ : Union[str, Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) lowerCamelCase__ : Union[str, Any] = self.conv_stem(_lowerCamelCase ) lowerCamelCase__ : Tuple = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): lowerCamelCase__ : Optional[int] = layer_module(_lowerCamelCase ) if output_hidden_states: lowerCamelCase__ : List[str] = all_hidden_states + (hidden_states,) lowerCamelCase__ : List[str] = hidden_states if self.pooler is not None: lowerCamelCase__ : Tuple = torch.flatten(self.pooler(_lowerCamelCase ) , start_dim=1 ) else: lowerCamelCase__ : List[Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=_lowerCamelCase , ) @add_start_docstrings( """\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , a__ , ) class _lowercase ( a__ ): def __init__( self: List[str] , UpperCamelCase__: int ): super().__init__(_lowerCamelCase ) lowerCamelCase__ : Optional[int] = config.num_labels lowerCamelCase__ : str = MobileNetVaModel(_lowerCamelCase ) lowerCamelCase__ : Dict = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head lowerCamelCase__ : Optional[int] = nn.Dropout(config.classifier_dropout_prob , inplace=_lowerCamelCase ) lowerCamelCase__ : int = nn.Linear(_lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: int = None , UpperCamelCase__: Optional[Any] = None , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[int] = None , ): lowerCamelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase__ : Dict = self.mobilenet_va(_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase ) lowerCamelCase__ : List[Any] = outputs.pooler_output if return_dict else outputs[1] lowerCamelCase__ : Tuple = self.classifier(self.dropout(_lowerCamelCase ) ) lowerCamelCase__ : int = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCamelCase__ : Any = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCamelCase__ : Optional[int] = '''single_label_classification''' else: lowerCamelCase__ : Dict = '''multi_label_classification''' if self.config.problem_type == "regression": lowerCamelCase__ : Any = MSELoss() if self.num_labels == 1: lowerCamelCase__ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCamelCase__ : Dict = loss_fct(_lowerCamelCase , _lowerCamelCase ) elif self.config.problem_type == "single_label_classification": lowerCamelCase__ : str = CrossEntropyLoss() lowerCamelCase__ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCamelCase__ : List[Any] = BCEWithLogitsLoss() lowerCamelCase__ : List[Any] = loss_fct(_lowerCamelCase , _lowerCamelCase ) if not return_dict: lowerCamelCase__ : Optional[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states , )
369
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _A : Optional[Any] =logging.get_logger(__name__) _A : List[str] ={ '''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''', # See all Marian models at https://huggingface.co/models?filter=marian } class _lowercase ( _lowercase ): a = """marian""" a = ["""past_key_values"""] a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=58_101 , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Any=12 , UpperCamelCase__: Optional[int]=4_096 , UpperCamelCase__: Tuple=16 , UpperCamelCase__: Dict=12 , UpperCamelCase__: Optional[Any]=4_096 , UpperCamelCase__: Any=16 , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Optional[int]=0.1 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: str=58_100 , UpperCamelCase__: Tuple=False , UpperCamelCase__: Optional[Any]=58_100 , UpperCamelCase__: int=0 , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[str]=True , **UpperCamelCase__: str , ): lowerCamelCase__ : int = vocab_size lowerCamelCase__ : Tuple = decoder_vocab_size or vocab_size lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[Any] = d_model lowerCamelCase__ : int = encoder_ffn_dim lowerCamelCase__ : Union[str, Any] = encoder_layers lowerCamelCase__ : Dict = encoder_attention_heads lowerCamelCase__ : Optional[int] = decoder_ffn_dim lowerCamelCase__ : List[str] = decoder_layers lowerCamelCase__ : Dict = decoder_attention_heads lowerCamelCase__ : int = dropout lowerCamelCase__ : str = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : List[str] = activation_function lowerCamelCase__ : Union[str, Any] = init_std lowerCamelCase__ : str = encoder_layerdrop lowerCamelCase__ : Any = decoder_layerdrop lowerCamelCase__ : List[str] = use_cache lowerCamelCase__ : List[str] = encoder_layers lowerCamelCase__ : int = scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase__ : str = share_encoder_decoder_embeddings super().__init__( pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , ) class _lowercase ( _lowercase ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase_ ( self: Union[str, Any] ): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : List[str] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: lowerCamelCase__ : Dict = {0: """batch"""} lowerCamelCase__ : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: lowerCamelCase__ : Any = {0: """batch""", 1: """decoder_sequence"""} lowerCamelCase__ : Dict = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Union[str, Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(UpperCamelCase__ ): lowerCamelCase__ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""} lowerCamelCase__ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""} else: lowerCamelCase__ : int = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase_ ( self: Optional[Any] ): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super().outputs else: lowerCamelCase__ : Any = super(UpperCamelCase__ , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers for i in range(UpperCamelCase__ ): lowerCamelCase__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""} lowerCamelCase__ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def lowerCamelCase_ ( self: str , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ): lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Generate decoder inputs lowerCamelCase__ : Any = seq_length if not self.use_past else 1 lowerCamelCase__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : str = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[int] = dict(**UpperCamelCase__ , **UpperCamelCase__ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = common_inputs["""input_ids"""].shape lowerCamelCase__ : Tuple = common_inputs["""decoder_input_ids"""].shape[1] lowerCamelCase__ , lowerCamelCase__ : List[str] = self.num_attention_heads lowerCamelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Tuple = decoder_seq_length + 3 lowerCamelCase__ : int = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : Optional[int] = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 ) lowerCamelCase__ : Any = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : Any = self.num_layers lowerCamelCase__ : str = min(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : str = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers lowerCamelCase__ : int = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(UpperCamelCase__ ): common_inputs["past_key_values"].append( ( torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ ), ) ) # TODO: test this. lowerCamelCase__ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(UpperCamelCase__ , UpperCamelCase__ ): common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) ) return common_inputs def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ): lowerCamelCase__ : Any = self._generate_dummy_inputs_for_encoder_and_decoder( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Any = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ : Optional[Any] = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Dict = self.num_layers lowerCamelCase__ , lowerCamelCase__ : Dict = self.num_attention_heads lowerCamelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[Any] = common_inputs["""attention_mask"""].dtype lowerCamelCase__ : int = torch.cat( [common_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 ) lowerCamelCase__ : int = [ (torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ ) ] return common_inputs def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCamelCase__ : List[Any] = compute_effective_axis_dimension( UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : Union[str, Any] = tokenizer.num_special_tokens_to_add(UpperCamelCase__ ) lowerCamelCase__ : Any = compute_effective_axis_dimension( UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Union[str, Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : str = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) ) return common_inputs def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) else: lowerCamelCase__ : Tuple = self._generate_dummy_inputs_for_causal_lm( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) return common_inputs def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any] ): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: lowerCamelCase__ : List[Any] = super(UpperCamelCase__ , self )._flatten_past_key_values_( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) @property def lowerCamelCase_ ( self: Union[str, Any] ): return 1e-4
129
0
"""simple docstring""" class UpperCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str = "" , SCREAMING_SNAKE_CASE_ : bool = False ): # Mapping from the first character of the prefix of the node lowerCAmelCase_ : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word lowerCAmelCase_ : str = is_leaf lowerCAmelCase_ : Optional[int] = prefix def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ): lowerCAmelCase_ : Any = 0 for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE_ ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : list[str] ): for word in words: self.insert(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ): # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: lowerCAmelCase_ : Union[str, Any] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: lowerCAmelCase_ : Union[str, Any] = RadixNode(prefix=SCREAMING_SNAKE_CASE_ , is_leaf=SCREAMING_SNAKE_CASE_ ) else: lowerCAmelCase_ : Dict = self.nodes[word[0]] lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = incoming_node.match( SCREAMING_SNAKE_CASE_ ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: lowerCAmelCase_ : str = remaining_prefix lowerCAmelCase_ : Optional[Any] = self.nodes[matching_string[0]] lowerCAmelCase_ : Union[str, Any] = RadixNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[Any] = aux_node if remaining_word == "": lowerCAmelCase_ : List[str] = True else: self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ): lowerCAmelCase_ : int = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ ) if not incoming_node: return False else: lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = incoming_node.match( SCREAMING_SNAKE_CASE_ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ): lowerCAmelCase_ : Tuple = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ ) if not incoming_node: return False else: lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Dict = incoming_node.match( SCREAMING_SNAKE_CASE_ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(SCREAMING_SNAKE_CASE_ ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: lowerCAmelCase_ : Optional[int] = list(self.nodes.values() )[0] lowerCAmelCase_ : Any = merging_node.is_leaf self.prefix += merging_node.prefix lowerCAmelCase_ : Optional[Any] = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: lowerCAmelCase_ : Optional[int] = False # If there is 1 edge, we merge it with its child else: lowerCAmelCase_ : str = list(incoming_node.nodes.values() )[0] lowerCAmelCase_ : int = merging_node.is_leaf incoming_node.prefix += merging_node.prefix lowerCAmelCase_ : str = merging_node.nodes return True def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : int = 0 ): if self.prefix != "": print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' ) for value in self.nodes.values(): value.print_tree(height + 1 ) def UpperCamelCase_ ( ) -> bool: """simple docstring""" lowerCAmelCase_ : int = 'banana bananas bandana band apple all beast'.split() lowerCAmelCase_ : List[str] = RadixNode() root.insert_many(lowerCAmelCase__ ) assert all(root.find(lowerCAmelCase__ ) for word in words ) assert not root.find('bandanas' ) assert not root.find('apps' ) root.delete('all' ) assert not root.find('all' ) root.delete('banana' ) assert not root.find('banana' ) assert root.find('bananas' ) return True def UpperCamelCase_ ( ) -> None: """simple docstring""" assert test_trie() def UpperCamelCase_ ( ) -> None: """simple docstring""" lowerCAmelCase_ : Any = RadixNode() lowerCAmelCase_ : List[Any] = 'banana bananas bandanas bandana band apple all beast'.split() root.insert_many(lowerCAmelCase__ ) print('Words:' , lowerCAmelCase__ ) print('Tree:' ) root.print_tree() if __name__ == "__main__": main()
224
"""simple docstring""" import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowercase__ : List[Any] = logging.getLogger(__name__) def UpperCamelCase_ ( ) -> Dict: """simple docstring""" lowerCAmelCase_ : Tuple = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=lowerCAmelCase__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=lowerCAmelCase__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=lowerCAmelCase__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=lowerCAmelCase__ , default=1000 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=lowerCAmelCase__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=lowerCAmelCase__ , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=lowerCAmelCase__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) lowerCAmelCase_ : List[Any] = parser.parse_args() return args def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Union[str, Any]: """simple docstring""" def fn(lowerCAmelCase__ : Optional[Any] ): return tokenizer(examples['text'] ) return fn def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Dict: """simple docstring""" lowerCAmelCase_ : int = [] for i in range(len(tokenized_data['input_ids'] ) ): lowerCAmelCase_ : Tuple = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } lowerCAmelCase_ : Union[str, Any] = tf.train.Features(feature=lowerCAmelCase__ ) lowerCAmelCase_ : Dict = tf.train.Example(features=lowerCAmelCase__ ) lowerCAmelCase_ : Optional[int] = example.SerializeToString() records.append(lowerCAmelCase__ ) return records def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]: """simple docstring""" lowerCAmelCase_ : str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: lowerCAmelCase_ : Tuple = min(len(lowerCAmelCase__ ) , args.limit ) lowerCAmelCase_ : Any = dataset.select(range(lowerCAmelCase__ ) ) print(f"Limiting the dataset to {args.limit} entries." ) lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) lowerCAmelCase_ : int = os.path.join(args.output_dir , args.split ) if not os.path.exists(lowerCAmelCase__ ): os.makedirs(lowerCAmelCase__ ) else: lowerCAmelCase_ : Dict = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. lowerCAmelCase_ : Dict = tokenize_function(lowerCAmelCase__ ) lowerCAmelCase_ : List[Any] = dataset.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowerCAmelCase__ : List[Any] ): # Concatenate all texts. lowerCAmelCase_ : int = {k: sum(examples[k] , [] ) for k in examples.keys()} lowerCAmelCase_ : Any = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 lowerCAmelCase_ : Union[str, Any] = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. lowerCAmelCase_ : Optional[Any] = { k: [t[i : i + args.max_length] for i in range(0 , lowerCAmelCase__ , args.max_length )] for k, t in concatenated_examples.items() } return result lowerCAmelCase_ : Optional[int] = dataset_tokenized.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , batch_size=1000 , num_proc=4 ) lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[Any] = 0 for shard in range(0 , len(lowerCAmelCase__ ) , args.shard_size ): lowerCAmelCase_ : Dict = grouped_dataset[shard : shard + args.shard_size] lowerCAmelCase_ : Tuple = len(dataset_snapshot['input_ids'] ) lowerCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase__ , f"dataset-{shard_count}-{records_containing}.tfrecord" ) lowerCAmelCase_ : Tuple = get_serialized_examples(lowerCAmelCase__ ) with tf.io.TFRecordWriter(lowerCAmelCase__ ) as out_file: for i in range(len(lowerCAmelCase__ ) ): lowerCAmelCase_ : Dict = serialized_examples[i] out_file.write(lowerCAmelCase__ ) print('Wrote file {} containing {} records'.format(lowerCAmelCase__ , lowerCAmelCase__ ) ) shard_count += 1 total_records += records_containing with open(f"split-{args.split}-records-count.txt" , 'w' ) as f: print(f"Total {args.split} records: {total_records}" , file=lowerCAmelCase__ ) if __name__ == "__main__": lowercase__ : int = parse_args() main(args)
224
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase : int = 1_6 UpperCamelCase : str = 3_2 def A ( snake_case :Accelerator , snake_case :int = 1_6 ) -> Optional[int]: __UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' ) __UpperCamelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(snake_case :Dict ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __UpperCamelCase = datasets.map( snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case :Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. __UpperCamelCase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __UpperCamelCase = 1_6 elif accelerator.mixed_precision != "no": __UpperCamelCase = 8 else: __UpperCamelCase = None return tokenizer.pad( snake_case , padding='longest' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='pt' , ) # Instantiate dataloaders. __UpperCamelCase = DataLoader( tokenized_datasets['train'] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) __UpperCamelCase = DataLoader( tokenized_datasets['validation'] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase : Optional[int] = mocked_dataloaders # noqa: F811 def A ( snake_case :Optional[Any] , snake_case :Dict ) -> Any: # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , snake_case ) == "1": __UpperCamelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: __UpperCamelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: __UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['lr'] __UpperCamelCase = int(config['num_epochs'] ) __UpperCamelCase = int(config['seed'] ) __UpperCamelCase = int(config['batch_size'] ) set_seed(snake_case ) __UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case ) __UpperCamelCase = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation __UpperCamelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE __UpperCamelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer __UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case ) # Instantiate scheduler __UpperCamelCase = get_linear_schedule_with_warmup( optimizer=snake_case , num_warmup_steps=1_0_0 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( snake_case , snake_case , snake_case , snake_case , snake_case ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: __UpperCamelCase = os.path.split(snake_case )[-1].split('.' )[0] accelerator.init_trackers(snake_case , snake_case ) # Now we train the model for epoch in range(snake_case ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: __UpperCamelCase = 0 for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __UpperCamelCase = model(**snake_case ) __UpperCamelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() __UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase = model(**snake_case ) __UpperCamelCase = outputs.logits.argmax(dim=-1 ) __UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=snake_case , references=snake_case , ) __UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , snake_case ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { 'accuracy': eval_metric['accuracy'], 'f1': eval_metric['f1'], 'train_loss': total_loss.item() / len(snake_case ), 'epoch': epoch, } , step=snake_case , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def A ( ) -> Tuple: __UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=snake_case , default=snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=snake_case , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6} training_function(snake_case , snake_case ) if __name__ == "__main__": main()
263
"""simple docstring""" UpperCamelCase : Union[str, Any] = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def A ( snake_case :Dict , snake_case :Tuple , snake_case :str , snake_case :Optional[int] ) -> Union[str, Any]: # Return True if there is node that has not iterated. __UpperCamelCase = [False] * len(snake_case ) __UpperCamelCase = [s] __UpperCamelCase = True while queue: __UpperCamelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(snake_case ) __UpperCamelCase = True __UpperCamelCase = u return visited[t] def A ( snake_case :int , snake_case :Any , snake_case :Union[str, Any] ) -> Optional[int]: __UpperCamelCase = [-1] * (len(snake_case )) __UpperCamelCase = 0 __UpperCamelCase = [] __UpperCamelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(snake_case , snake_case , snake_case , snake_case ): __UpperCamelCase = float('Inf' ) __UpperCamelCase = sink while s != source: # Find the minimum value in select path __UpperCamelCase = min(snake_case , graph[parent[s]][s] ) __UpperCamelCase = parent[s] max_flow += path_flow __UpperCamelCase = sink while v != source: __UpperCamelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __UpperCamelCase = parent[v] for i in range(len(snake_case ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
263
1
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _SCREAMING_SNAKE_CASE: def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=10 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=32 * 4 ,SCREAMING_SNAKE_CASE__=32 * 6 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=32 ,) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE :List[Any] = parent __SCREAMING_SNAKE_CASE :int = batch_size __SCREAMING_SNAKE_CASE :List[str] = is_training __SCREAMING_SNAKE_CASE :Tuple = use_auxiliary_loss __SCREAMING_SNAKE_CASE :str = num_queries __SCREAMING_SNAKE_CASE :int = num_channels __SCREAMING_SNAKE_CASE :List[str] = min_size __SCREAMING_SNAKE_CASE :Optional[int] = max_size __SCREAMING_SNAKE_CASE :Union[str, Any] = num_labels __SCREAMING_SNAKE_CASE :str = mask_feature_size def _UpperCamelCase ( self ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Any = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Tuple = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=SCREAMING_SNAKE_CASE__ ) > 0.5 ).float() __SCREAMING_SNAKE_CASE :List[str] = (torch.rand((self.batch_size, self.num_labels) ,device=SCREAMING_SNAKE_CASE__ ) > 0.5).long() __SCREAMING_SNAKE_CASE :str = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _UpperCamelCase ( self ) -> List[Any]: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig( decoder_ffn_dim=1_28 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,) def _UpperCamelCase ( self ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE :Tuple = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE :Any = output.encoder_hidden_states __SCREAMING_SNAKE_CASE :Tuple = output.pixel_decoder_hidden_states __SCREAMING_SNAKE_CASE :Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) ,config.decoder_config.decoder_layers ) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __SCREAMING_SNAKE_CASE :Any = MaskFormerModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __SCREAMING_SNAKE_CASE :Tuple = model(pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Any = model(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE :Tuple = MaskFormerForInstanceSegmentation(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE__ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE :Optional[Any] = model(pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Dict = model(SCREAMING_SNAKE_CASE__ ) comm_check_on_output(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Union[str, Any] = model( pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__ ) comm_check_on_output(SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class _SCREAMING_SNAKE_CASE( A , A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Optional[int] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () SCREAMING_SNAKE_CASE_ : int = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[int] = False SCREAMING_SNAKE_CASE_ : Dict = False SCREAMING_SNAKE_CASE_ : Dict = False SCREAMING_SNAKE_CASE_ : Union[str, Any] = False def _UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[int] = MaskFormerModelTester(self ) __SCREAMING_SNAKE_CASE :Optional[Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _UpperCamelCase ( self ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE__ ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def _UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def _UpperCamelCase ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def _UpperCamelCase ( self ) -> Dict: """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def _UpperCamelCase ( self ) -> int: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def _UpperCamelCase ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" pass def _UpperCamelCase ( self ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE :Tuple = model_class(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE :Optional[Any] = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE :str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__ ) @slow def _UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: __SCREAMING_SNAKE_CASE :Union[str, Any] = MaskFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE :Any = (self.model_tester.min_size,) * 2 __SCREAMING_SNAKE_CASE :List[Any] = { '''pixel_values''': torch.randn((2, 3, *size) ,device=SCREAMING_SNAKE_CASE__ ), '''mask_labels''': torch.randn((2, 10, *size) ,device=SCREAMING_SNAKE_CASE__ ), '''class_labels''': torch.zeros(2 ,10 ,device=SCREAMING_SNAKE_CASE__ ).long(), } __SCREAMING_SNAKE_CASE :List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :List[str] = model(**SCREAMING_SNAKE_CASE__ ) self.assertTrue(outputs.loss is not None ) def _UpperCamelCase ( self ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE :Optional[int] = model_class(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Any = model(**SCREAMING_SNAKE_CASE__ ,output_attentions=SCREAMING_SNAKE_CASE__ ) self.assertTrue(outputs.attentions is not None ) def _UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss __SCREAMING_SNAKE_CASE :int = self.all_model_classes[1] __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE :Tuple = model_class(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.train() __SCREAMING_SNAKE_CASE :Optional[int] = model(SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__ ).loss loss.backward() def _UpperCamelCase ( self ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE :Tuple = self.all_model_classes[1] __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE :Tuple = True __SCREAMING_SNAKE_CASE :Union[str, Any] = True __SCREAMING_SNAKE_CASE :Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.train() __SCREAMING_SNAKE_CASE :str = model(SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :List[str] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __SCREAMING_SNAKE_CASE :int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't __SCREAMING_SNAKE_CASE :str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __SCREAMING_SNAKE_CASE :Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCamelCase_ = 1e-4 def __lowerCamelCase ( ) -> Dict: __SCREAMING_SNAKE_CASE :Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class _SCREAMING_SNAKE_CASE( unittest.TestCase ): @cached_property def _UpperCamelCase ( self ) -> List[str]: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def _UpperCamelCase ( self ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :List[str] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Any = self.default_image_processor __SCREAMING_SNAKE_CASE :List[str] = prepare_img() __SCREAMING_SNAKE_CASE :Dict = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Tuple = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE__ ,(1, 3, 8_00, 10_88) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE :Dict = model(**SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Optional[Any] = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) ) __SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) ) __SCREAMING_SNAKE_CASE :List[str] = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE :int = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(SCREAMING_SNAKE_CASE__ ) .eval() ) __SCREAMING_SNAKE_CASE :List[Any] = self.default_image_processor __SCREAMING_SNAKE_CASE :List[str] = prepare_img() __SCREAMING_SNAKE_CASE :List[str] = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Optional[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE__ ,(1, 3, 8_00, 10_88) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE :Dict = model(**SCREAMING_SNAKE_CASE__ ) # masks_queries_logits __SCREAMING_SNAKE_CASE :Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) __SCREAMING_SNAKE_CASE :Union[str, Any] = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] __SCREAMING_SNAKE_CASE :str = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) ) # class_queries_logits __SCREAMING_SNAKE_CASE :Optional[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) __SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor( [ [1.6_512E00, -5.2_572E00, -3.3_519E00], [3.6_169E-02, -5.9_025E00, -2.9_313E00], [1.0_766E-04, -7.7_630E00, -5.1_263E00], ] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( self ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE :Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(SCREAMING_SNAKE_CASE__ ) .eval() ) __SCREAMING_SNAKE_CASE :str = self.default_image_processor __SCREAMING_SNAKE_CASE :Tuple = prepare_img() __SCREAMING_SNAKE_CASE :int = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Union[str, Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE__ ,(1, 3, 8_00, 10_88) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE :Tuple = model(**SCREAMING_SNAKE_CASE__ ) # masks_queries_logits __SCREAMING_SNAKE_CASE :Any = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) __SCREAMING_SNAKE_CASE :int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] __SCREAMING_SNAKE_CASE :Any = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) ) # class_queries_logits __SCREAMING_SNAKE_CASE :Any = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) __SCREAMING_SNAKE_CASE :int = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( self ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE :Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(SCREAMING_SNAKE_CASE__ ) .eval() ) __SCREAMING_SNAKE_CASE :Dict = self.default_image_processor __SCREAMING_SNAKE_CASE :Any = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] ,segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] ,return_tensors='''pt''' ,) __SCREAMING_SNAKE_CASE :Any = inputs['''pixel_values'''].to(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :str = [el.to(SCREAMING_SNAKE_CASE__ ) for el in inputs['''mask_labels''']] __SCREAMING_SNAKE_CASE :List[str] = [el.to(SCREAMING_SNAKE_CASE__ ) for el in inputs['''class_labels''']] with torch.no_grad(): __SCREAMING_SNAKE_CASE :Any = model(**SCREAMING_SNAKE_CASE__ ) self.assertTrue(outputs.loss is not None )
191
"""simple docstring""" from typing import Any def __lowerCamelCase ( a_ : list ) -> list[Any]: if not input_list: return [] __SCREAMING_SNAKE_CASE :int = [input_list.count(a_ ) for value in input_list] __SCREAMING_SNAKE_CASE :str = max(a_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(a_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
191
1
def _lowerCAmelCase ( A__: list , A__: list ): '''simple docstring''' _validate_point(A__ ) _validate_point(A__ ) if len(A__ ) != len(A__ ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(a - b ) for a, b in zip(A__ , A__ ) ) ) def _lowerCAmelCase ( A__: list[float] ): '''simple docstring''' if point: if isinstance(A__ , A__ ): for item in point: if not isinstance(A__ , (int, float) ): UpperCAmelCase = ( '''Expected a list of numbers as input, found ''' F"""{type(A__ ).__name__}""" ) raise TypeError(A__ ) else: UpperCAmelCase = F"""Expected a list of numbers as input, found {type(A__ ).__name__}""" raise TypeError(A__ ) else: raise ValueError('''Missing an input''' ) def _lowerCAmelCase ( A__: list , A__: list ): '''simple docstring''' _validate_point(A__ ) _validate_point(A__ ) if len(A__ ) != len(A__ ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(x - y ) for x, y in zip(A__ , A__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
152
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __magic_name__ = logging.get_logger(__name__) def _lowerCAmelCase ( A__: nn.ModuleList , A__: nn.ModuleList , A__: List[int] ): '''simple docstring''' UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(A__ ) == len(A__ ), F"""{len(A__ )} != {len(A__ )}""" dest_layers.load_state_dict(layers_to_copy.state_dict() ) __magic_name__ = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __magic_name__ = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def _lowerCAmelCase ( A__: List[str] , A__: Optional[int] ): '''simple docstring''' try: UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first""" F""" {n_student}""" ) return list(range(A__ ) ) def _lowerCAmelCase ( A__: Optional[int] , A__: Tuple ): '''simple docstring''' if n_student > n_teacher: raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" ) elif n_teacher == n_student: return list(range(A__ ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def _lowerCAmelCase ( A__: Union[str, PreTrainedModel] , A__: Union[str, Path] = "student" , A__: Union[int, None] = None , A__: Union[int, None] = None , A__: Optional[int]=False , A__: Tuple=None , A__: Any=None , **A__: List[str] , ): '''simple docstring''' UpperCAmelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.''' assert (e is not None) or (d is not None), _msg if isinstance(A__ , A__ ): AutoTokenizer.from_pretrained(A__ ).save_pretrained(A__ ) # purely for convenience UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).eval() else: assert isinstance(A__ , A__ ), F"""teacher must be a model or string got type {type(A__ )}""" UpperCAmelCase = teacher.config.to_diff_dict() try: UpperCAmelCase , UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: UpperCAmelCase = teacher_e if d is None: UpperCAmelCase = teacher_d init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} ) except AttributeError: # T5 if hasattr(teacher.config , '''num_encoder_layers''' ): UpperCAmelCase , UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: UpperCAmelCase , UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: UpperCAmelCase = teacher_e if d is None: UpperCAmelCase = teacher_d if hasattr(teacher.config , '''num_encoder_layers''' ): init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} ) else: init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(A__ ) # Copy weights UpperCAmelCase = teacher.config_class(**A__ ) UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(A__ ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=A__ ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save UpperCAmelCase , UpperCAmelCase = list(range(A__ ) ), list(range(A__ ) ) logger.info( F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to""" F""" {save_path}""" ) student.save_pretrained(A__ ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: UpperCAmelCase = pick_layers_to_copy(A__ , A__ ) if d_layers_to_copy is None: UpperCAmelCase = pick_layers_to_copy(A__ , A__ ) try: if hasattr( A__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , A__ ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , A__ ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , A__ ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , A__ ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , A__ ) copy_layers(teacher.decoder.block , student.decoder.block , A__ ) logger.info( F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" ) UpperCAmelCase = { '''teacher_type''': teacher.config.model_type, '''copied_encoder_layers''': e_layers_to_copy, '''copied_decoder_layers''': d_layers_to_copy, } student.save_pretrained(A__ ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
152
1
from __future__ import annotations UpperCAmelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] UpperCAmelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _a ( a :list[float] ) -> list[float]: a = [] a = len(a ) for i in range(a ): a = -1 for j in range(i + 1 , a ): if arr[i] < arr[j]: a = arr[j] break result.append(a ) return result def _a ( a :list[float] ) -> list[float]: a = [] for i, outer in enumerate(a ): a = -1 for inner in arr[i + 1 :]: if outer < inner: a = inner break result.append(a ) return result def _a ( a :list[float] ) -> list[float]: a = len(a ) a = [] a = [-1] * arr_size for index in reversed(range(a ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: a = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) UpperCAmelCase__ = ( "from __main__ import arr, next_greatest_element_slow, " "next_greatest_element_fast, next_greatest_element" ) print( "next_greatest_element_slow():", timeit("next_greatest_element_slow(arr)", setup=setup), ) print( "next_greatest_element_fast():", timeit("next_greatest_element_fast(arr)", setup=setup), ) print( " next_greatest_element():", timeit("next_greatest_element(arr)", setup=setup), )
0
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : List[Any] ) ->Tuple: """simple docstring""" a = tempfile.mkdtemp() # fmt: off a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) a = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } a = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int: """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __lowerCAmelCase ( self : Tuple ) ->Any: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self : Any ) ->Tuple: """simple docstring""" a = self.get_tokenizer() a = self.get_image_processor() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __lowerCAmelCase ( self : int ) ->Dict: """simple docstring""" a = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) a = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) a = self.prepare_image_inputs() a = image_processor(__UpperCAmelCase , return_tensors='''np''' ) a = processor(images=__UpperCAmelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowerCAmelCase ( self : List[str] ) ->str: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) a = '''lower newer''' a = processor(text=__UpperCAmelCase ) a = tokenizer(__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self : List[Any] ) ->List[str]: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) a = '''lower newer''' a = self.prepare_image_inputs() a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(__UpperCAmelCase ): processor() def __lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__UpperCAmelCase ) a = tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __lowerCAmelCase ( self : Optional[Any] ) ->Dict: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) a = '''lower newer''' a = self.prepare_image_inputs() a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
0
1
def snake_case( __magic_name__ ) -> list[list[int]]: '''simple docstring''' lowercase : List[Any] = [] if len(__magic_name__ ) == 1: return [nums.copy()] for _ in range(len(__magic_name__ ) ): lowercase : Optional[Any] = nums.pop(0 ) lowercase : Optional[int] = permute(__magic_name__ ) for perm in permutations: perm.append(__magic_name__ ) result.extend(__magic_name__ ) nums.append(__magic_name__ ) return result def snake_case( __magic_name__ ) -> List[Any]: '''simple docstring''' def backtrack(__magic_name__ ): if start == len(__magic_name__ ) - 1: output.append(nums[:] ) else: for i in range(__magic_name__ , len(__magic_name__ ) ): lowercase : Tuple = nums[i], nums[start] backtrack(start + 1 ) lowercase : Optional[int] = nums[i], nums[start] # backtrack lowercase : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCAmelCase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
355
import math def snake_case( __magic_name__ ) -> bool: '''simple docstring''' lowercase : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__magic_name__ ) def snake_case( __magic_name__ = 1 / 1_23_45 ) -> int: '''simple docstring''' lowercase : Union[str, Any] = 0 lowercase : str = 0 lowercase : Optional[int] = 3 while True: lowercase : Any = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__magic_name__ ): lowercase : Any = int(__magic_name__ ) total_partitions += 1 if check_partition_perfect(__magic_name__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__magic_name__ ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
116
0
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def lowerCAmelCase_ ( __a ) -> List[Any]: """simple docstring""" return EnvironmentCommand() def lowerCAmelCase_ ( __a ) -> List[Any]: """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->Dict: '''simple docstring''' lowerCamelCase__: Dict =parser.add_parser("env") download_parser.set_defaults(func=UpperCAmelCase_) download_parser.add_argument( "--accelerate-config_file" , default=UpperCAmelCase_ , help="The accelerate config file to use for the default values in the launching script." , ) download_parser.set_defaults(func=UpperCAmelCase_) def __init__(self : Union[str, Any] , UpperCAmelCase_ : str , *UpperCAmelCase_ : Union[str, Any]) ->None: '''simple docstring''' lowerCamelCase__: Union[str, Any] =accelerate_config_file def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]: '''simple docstring''' lowerCamelCase__: List[str] ="not installed" if is_safetensors_available(): import safetensors lowerCamelCase__: Optional[Any] =safetensors.__version__ elif importlib.util.find_spec("safetensors") is not None: import safetensors lowerCamelCase__: Optional[Any] =F"""{safetensors.__version__} but is ignored because of PyTorch version too old.""" lowerCamelCase__: Optional[Any] ="not installed" lowerCamelCase__: Any ="not found" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file lowerCamelCase__: Optional[int] =accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(UpperCAmelCase_): lowerCamelCase__: List[Any] =load_config_from_file(self._accelerate_config_file).to_dict() lowerCamelCase__: Optional[Any] =( "\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()]) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else F"""\t{accelerate_config}""" ) lowerCamelCase__: Optional[Any] ="not installed" lowerCamelCase__: str ="NA" if is_torch_available(): import torch lowerCamelCase__: str =torch.__version__ lowerCamelCase__: List[Any] =torch.cuda.is_available() lowerCamelCase__: List[str] ="not installed" lowerCamelCase__: Tuple ="NA" if is_tf_available(): import tensorflow as tf lowerCamelCase__: Tuple =tf.__version__ try: # deprecated in v2.1 lowerCamelCase__: List[Any] =tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool lowerCamelCase__: Union[str, Any] =bool(tf.config.list_physical_devices("GPU")) lowerCamelCase__: int ="not installed" lowerCamelCase__: Optional[int] ="not installed" lowerCamelCase__: List[Any] ="not installed" lowerCamelCase__: Any ="NA" if is_flax_available(): import flax import jax import jaxlib lowerCamelCase__: Optional[int] =flax.__version__ lowerCamelCase__: Tuple =jax.__version__ lowerCamelCase__: List[Any] =jaxlib.__version__ lowerCamelCase__: Dict =jax.lib.xla_bridge.get_backend().platform lowerCamelCase__: Union[str, Any] ={ "`transformers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": huggingface_hub.__version__, "Safetensors version": F"""{safetensors_version}""", "Accelerate version": F"""{accelerate_version}""", "Accelerate config": F"""{accelerate_config_str}""", "PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""", "Tensorflow version (GPU?)": F"""{tf_version} ({tf_cuda_available})""", "Flax version (CPU?/GPU?/TPU?)": F"""{flax_version} ({jax_backend})""", "Jax version": F"""{jax_version}""", "JaxLib version": F"""{jaxlib_version}""", "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n") print(self.format_dict(UpperCAmelCase_)) return info @staticmethod def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : str) ->Union[str, Any]: '''simple docstring''' return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()]) + "\n"
10
"""simple docstring""" import colorsys from PIL import Image # type: ignore def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float: SCREAMING_SNAKE_CASE__ : Union[str, Any] = x SCREAMING_SNAKE_CASE__ : Union[str, Any] = y for step in range(__lowerCAmelCase ): # noqa: B007 SCREAMING_SNAKE_CASE__ : str = a * a - b * b + x SCREAMING_SNAKE_CASE__ : Dict = 2 * a * b + y SCREAMING_SNAKE_CASE__ : Dict = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _lowercase ( __lowerCAmelCase ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def _lowercase ( __lowerCAmelCase ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCAmelCase , 1 , 1 ) ) def _lowercase ( __lowerCAmelCase = 800 , __lowerCAmelCase = 600 , __lowerCAmelCase = -0.6 , __lowerCAmelCase = 0 , __lowerCAmelCase = 3.2 , __lowerCAmelCase = 50 , __lowerCAmelCase = True , ) -> Image.Image: SCREAMING_SNAKE_CASE__ : int = Image.new("""RGB""" , (image_width, image_height) ) SCREAMING_SNAKE_CASE__ : Tuple = img.load() # loop through the image-coordinates for image_x in range(__lowerCAmelCase ): for image_y in range(__lowerCAmelCase ): # determine the figure-coordinates based on the image-coordinates SCREAMING_SNAKE_CASE__ : str = figure_width / image_width * image_height SCREAMING_SNAKE_CASE__ : int = figure_center_x + (image_x / image_width - 0.5) * figure_width SCREAMING_SNAKE_CASE__ : Any = figure_center_y + (image_y / image_height - 0.5) * figure_height SCREAMING_SNAKE_CASE__ : Optional[int] = get_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_color_coded_rgb(__lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__ : Dict = get_black_and_white_rgb(__lowerCAmelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure a :List[str] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
132
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a =logging.get_logger(__name__) a ={ """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Optional[int] = '''table-transformer''' _UpperCAmelCase : Optional[Any] = ['''past_key_values'''] _UpperCAmelCase : Tuple = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Dict=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0_0 ,SCREAMING_SNAKE_CASE__ : Tuple=6 ,SCREAMING_SNAKE_CASE__ : str=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : List[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : List[str]=8 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : str="relu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_5_6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : List[str]=1.0 ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Optional[Any]="sine" ,SCREAMING_SNAKE_CASE__ : int="resnet50" ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any=1 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : Dict=1 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=2 ,SCREAMING_SNAKE_CASE__ : int=0.1 ,**SCREAMING_SNAKE_CASE__ : Dict ,): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.') if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') __lowerCamelCase : int = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(__A ,__A): __lowerCamelCase : str = backbone_config.get('model_type') __lowerCamelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase : List[Any] = config_class.from_dict(__A) # set timm attributes to None __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = None, None, None __lowerCamelCase : Dict = use_timm_backbone __lowerCamelCase : List[Any] = backbone_config __lowerCamelCase : Tuple = num_channels __lowerCamelCase : int = num_queries __lowerCamelCase : str = d_model __lowerCamelCase : Tuple = encoder_ffn_dim __lowerCamelCase : Tuple = encoder_layers __lowerCamelCase : Union[str, Any] = encoder_attention_heads __lowerCamelCase : Any = decoder_ffn_dim __lowerCamelCase : Optional[int] = decoder_layers __lowerCamelCase : int = decoder_attention_heads __lowerCamelCase : Union[str, Any] = dropout __lowerCamelCase : Dict = attention_dropout __lowerCamelCase : Any = activation_dropout __lowerCamelCase : List[Any] = activation_function __lowerCamelCase : Tuple = init_std __lowerCamelCase : Optional[int] = init_xavier_std __lowerCamelCase : Optional[int] = encoder_layerdrop __lowerCamelCase : List[str] = decoder_layerdrop __lowerCamelCase : List[Any] = encoder_layers __lowerCamelCase : str = auxiliary_loss __lowerCamelCase : List[Any] = position_embedding_type __lowerCamelCase : str = backbone __lowerCamelCase : List[Any] = use_pretrained_backbone __lowerCamelCase : Optional[Any] = dilation # Hungarian matcher __lowerCamelCase : Any = class_cost __lowerCamelCase : int = bbox_cost __lowerCamelCase : Optional[int] = giou_cost # Loss coefficients __lowerCamelCase : Dict = mask_loss_coefficient __lowerCamelCase : List[Any] = dice_loss_coefficient __lowerCamelCase : int = bbox_loss_coefficient __lowerCamelCase : Union[str, Any] = giou_loss_coefficient __lowerCamelCase : str = eos_coefficient super().__init__(is_encoder_decoder=__A ,**__A) @property def lowerCAmelCase ( self : List[str]): return self.encoder_attention_heads @property def lowerCAmelCase ( self : Any): return self.d_model class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Optional[int] = version.parse('''1.11''' ) @property def lowerCAmelCase ( self : Dict): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ]) @property def lowerCAmelCase ( self : Union[str, Any]): return 1E-5 @property def lowerCAmelCase ( self : Tuple): return 1_2
359
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller a =3 def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int: print('Generating primitive root of p' ) while True: __lowerCamelCase : Tuple = random.randrange(3 , lowerCamelCase__ ) if pow(lowerCamelCase__ , 2 , lowerCamelCase__ ) == 1: continue if pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) == 1: continue return g def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print('Generating prime p...' ) __lowerCamelCase : List[str] = rabin_miller.generate_large_prime(lowerCamelCase__ ) # select large prime number. __lowerCamelCase : Dict = primitive_root(lowerCamelCase__ ) # one primitive root on modulo p. __lowerCamelCase : Optional[int] = random.randrange(3 , lowerCamelCase__ ) # private_key -> have to be greater than 2 for safety. __lowerCamelCase : List[Any] = cryptomath.find_mod_inverse(pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) __lowerCamelCase : int = (key_size, e_a, e_a, p) __lowerCamelCase : str = (key_size, d) return public_key, private_key def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> None: if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ): print('\nWARNING:' ) print( F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n" 'Use a different name or delete these files and re-run this program.' ) sys.exit() __lowerCamelCase , __lowerCamelCase : List[Any] = generate_key(lowerCamelCase__ ) print(F"\nWriting public key to file {name}_pubkey.txt..." ) with open(F"{name}_pubkey.txt" , 'w' ) as fo: fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" ) print(F"Writing private key to file {name}_privkey.txt..." ) with open(F"{name}_privkey.txt" , 'w' ) as fo: fo.write(F"{private_key[0]},{private_key[1]}" ) def SCREAMING_SNAKE_CASE__ ( ) -> None: print('Making key files...' ) make_key_files('elgamal' , 2_0_4_8 ) print('Key files generation successful' ) if __name__ == "__main__": main()
113
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '''▁''' __A = {'''vocab_file''': '''sentencepiece.bpe.model'''} __A = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } __A = { '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off __A = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class _snake_case ( a__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = ["input_ids", "attention_mask"] snake_case__ = [] snake_case__ = [] def __init__( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any]="<s>" , UpperCAmelCase : Union[str, Any]="</s>" , UpperCAmelCase : Any="</s>" , UpperCAmelCase : str="<s>" , UpperCAmelCase : Any="<unk>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : List[Any]="<mask>" , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[Dict[str, Any]] = None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : List[str] , ): # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token __lowerCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs __lowerCamelCase : int = legacy_behaviour super().__init__( bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenizer_file=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , ) __lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase ) ) __lowerCamelCase : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token __lowerCamelCase : List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowerCamelCase : Union[str, Any] = 1 __lowerCamelCase : Any = len(self.sp_model ) __lowerCamelCase : Any = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase ) } __lowerCamelCase : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()} __lowerCamelCase : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __lowerCamelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __lowerCamelCase : List[Any] = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) __lowerCamelCase : Dict = src_lang if src_lang is not None else "eng_Latn" __lowerCamelCase : List[Any] = self.lang_code_to_id[self._src_lang] __lowerCamelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : str ): __lowerCamelCase : List[str] = self.__dict__.copy() __lowerCamelCase : str = None __lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[str] , UpperCAmelCase : Any ): __lowerCamelCase : Optional[int] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __lowerCamelCase : str = {} __lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowerCamelCase__ ( self : List[str] ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowerCamelCase__ ( self : Optional[Any] ): return self._src_lang @src_lang.setter def lowerCamelCase__ ( self : Any , UpperCAmelCase : str ): __lowerCamelCase : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) __lowerCamelCase : Tuple = [1] * len(self.prefix_tokens ) __lowerCamelCase : Optional[Any] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(UpperCAmelCase )) + ([0] * len(UpperCAmelCase )) + suffix_ones def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): __lowerCamelCase : Dict = [self.sep_token_id] __lowerCamelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : Optional[int] ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) __lowerCamelCase : Union[str, Any] = src_lang __lowerCamelCase : Optional[int] = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) __lowerCamelCase : Dict = self.convert_tokens_to_ids(UpperCAmelCase ) __lowerCamelCase : Union[str, Any] = tgt_lang_id return inputs def lowerCamelCase__ ( self : str ): __lowerCamelCase : Dict = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : str ): return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[Any] ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowerCamelCase : Optional[Any] = self.sp_model.PieceToId(UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ): __lowerCamelCase : Union[str, Any] = "".join(UpperCAmelCase ).replace(UpperCAmelCase , " " ).strip() return out_string def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): if not os.path.isdir(UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCamelCase : Tuple = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , "wb" ) as fi: __lowerCamelCase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,) def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Optional[Any] , ): __lowerCamelCase : str = src_lang __lowerCamelCase : Any = tgt_lang return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def lowerCamelCase__ ( self : Dict ): return self.set_src_lang_special_tokens(self.src_lang ) def lowerCamelCase__ ( self : int ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[str] ): __lowerCamelCase : Dict = self.lang_code_to_id[src_lang] if self.legacy_behaviour: __lowerCamelCase : Union[str, Any] = [] __lowerCamelCase : Any = [self.eos_token_id, self.cur_lang_code] else: __lowerCamelCase : Dict = [self.cur_lang_code] __lowerCamelCase : Optional[Any] = [self.eos_token_id] def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : str ): __lowerCamelCase : List[Any] = self.lang_code_to_id[lang] if self.legacy_behaviour: __lowerCamelCase : Any = [] __lowerCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code] else: __lowerCamelCase : Dict = [self.cur_lang_code] __lowerCamelCase : Any = [self.eos_token_id]
135
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __A = logging.get_logger(__name__) class _snake_case ( a__ ): snake_case__ = ["input_features", "attention_mask"] def __init__( self : Union[str, Any] , UpperCAmelCase : Tuple=80 , UpperCAmelCase : Tuple=16000 , UpperCAmelCase : Any=80 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Tuple=True , **UpperCAmelCase : Optional[int] , ): super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase ) __lowerCamelCase : str = num_mel_bins __lowerCamelCase : Tuple = do_ceptral_normalize __lowerCamelCase : Dict = normalize_means __lowerCamelCase : str = normalize_vars __lowerCamelCase : Optional[int] = True def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : np.ndarray , ): __lowerCamelCase : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __lowerCamelCase : Optional[int] = torch.from_numpy(UpperCAmelCase ).unsqueeze(0 ) __lowerCamelCase : str = ta_kaldi.fbank(UpperCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def lowerCamelCase__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __lowerCamelCase : Any = x[:input_length].mean(axis=0 ) __lowerCamelCase : Optional[int] = np.subtract(UpperCAmelCase , UpperCAmelCase ) if normalize_vars: __lowerCamelCase : int = x[:input_length].std(axis=0 ) __lowerCamelCase : Union[str, Any] = np.divide(UpperCAmelCase , UpperCAmelCase ) if input_length < x.shape[0]: __lowerCamelCase : Any = padding_value # make sure array is in float32 __lowerCamelCase : List[str] = x.astype(np.floataa ) return x def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : Optional[np.ndarray] = None ): __lowerCamelCase : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(UpperCAmelCase , UpperCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(UpperCAmelCase , UpperCAmelCase ) ] def __call__( self : Optional[Any] , UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) __lowerCamelCase : Optional[int] = isinstance(UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) __lowerCamelCase : Tuple = is_batched_numpy or ( isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowerCamelCase : Dict = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ): __lowerCamelCase : Optional[int] = np.asarray(UpperCAmelCase , dtype=np.floataa ) elif isinstance(UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowerCamelCase : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowerCamelCase : Optional[int] = [raw_speech] # extract fbank features __lowerCamelCase : Optional[Any] = [self._extract_fbank_features(UpperCAmelCase ) for waveform in raw_speech] # convert into correct format for padding __lowerCamelCase : Dict = BatchFeature({"input_features": features} ) __lowerCamelCase : Optional[Any] = self.pad( UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , ) # make sure list is in array format __lowerCamelCase : Tuple = padded_inputs.get("input_features" ) if isinstance(input_features[0] , UpperCAmelCase ): __lowerCamelCase : List[str] = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for feature in input_features] __lowerCamelCase : Optional[int] = padded_inputs.get("attention_mask" ) if attention_mask is not None: __lowerCamelCase : Union[str, Any] = [np.asarray(UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __lowerCamelCase : Optional[int] = ( np.array(UpperCAmelCase , dtype=np.intaa ) if self._get_padding_strategies(UpperCAmelCase , max_length=UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __lowerCamelCase : Optional[int] = self.normalize( padded_inputs["input_features"] , attention_mask=UpperCAmelCase ) if return_tensors is not None: __lowerCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(UpperCAmelCase ) return padded_inputs
135
1
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __UpperCamelCase = data_utils.TransfoXLTokenizer __UpperCamelCase = data_utils.TransfoXLCorpus __UpperCamelCase = data_utils __UpperCamelCase = data_utils def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(UpperCAmelCase , 'rb' ) as fp: snake_case_ = pickle.load(UpperCAmelCase , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) snake_case_ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(f'Save vocabulary to {pytorch_vocab_dump_path}' ) snake_case_ = corpus.vocab.__dict__ torch.save(UpperCAmelCase , UpperCAmelCase ) snake_case_ = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase ) snake_case_ = pytorch_dump_folder_path + '/' + CORPUS_NAME print(f'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(UpperCAmelCase , UpperCAmelCase ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model snake_case_ = os.path.abspath(UpperCAmelCase ) snake_case_ = os.path.abspath(UpperCAmelCase ) print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": snake_case_ = TransfoXLConfig() else: snake_case_ = TransfoXLConfig.from_json_file(UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) snake_case_ = TransfoXLLMHeadModel(UpperCAmelCase ) snake_case_ = load_tf_weights_in_transfo_xl(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Save pytorch-model snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) print(f'Save PyTorch model to {os.path.abspath(UpperCAmelCase )}' ) torch.save(model.state_dict() , UpperCAmelCase ) print(f'Save configuration file to {os.path.abspath(UpperCAmelCase )}' ) with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) __UpperCamelCase = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
354
"""simple docstring""" import os import numpy import onnx def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]: snake_case_ = os.path.dirname(UpperCAmelCase ) snake_case_ = os.path.basename(UpperCAmelCase ) snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCAmelCase ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
312
0
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowercase : Dict = "src/transformers" _lowercase : List[Any] = "docs/source/en" _lowercase : Optional[Any] = "." def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowercase_ : Tuple = f.readlines() # Find the start prompt. lowercase_ : List[str] = 0 while not lines[start_index].startswith(__SCREAMING_SNAKE_CASE ): start_index += 1 start_index += 1 lowercase_ : List[str] = start_index while not lines[end_index].startswith(__SCREAMING_SNAKE_CASE ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowercase : List[Any] = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. _lowercase : Dict = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowercase : str = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. _lowercase : Any = direct_transformers_import(TRANSFORMERS_PATH) def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" lowercase_ : Optional[Any] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __SCREAMING_SNAKE_CASE ) return [m.group(0 ) for m in matches] def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" lowercase_ : Union[str, Any] = 2 if text == '''✅''' or text == '''❌''' else len(__SCREAMING_SNAKE_CASE ) lowercase_ : str = (width - text_length) // 2 lowercase_ : str = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def snake_case_ ( ): """simple docstring""" lowercase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES lowercase_ : Dict = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } lowercase_ : Dict = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. lowercase_ : Union[str, Any] = collections.defaultdict(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = collections.defaultdict(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = collections.defaultdict(__SCREAMING_SNAKE_CASE ) lowercase_ : str = collections.defaultdict(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = collections.defaultdict(__SCREAMING_SNAKE_CASE ) # Let's lookup through all transformers object (once). for attr_name in dir(__SCREAMING_SNAKE_CASE ): lowercase_ : Any = None if attr_name.endswith('''Tokenizer''' ): lowercase_ : Any = slow_tokenizers lowercase_ : List[str] = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): lowercase_ : Dict = fast_tokenizers lowercase_ : Optional[int] = attr_name[:-13] elif _re_tf_models.match(__SCREAMING_SNAKE_CASE ) is not None: lowercase_ : Any = tf_models lowercase_ : Any = _re_tf_models.match(__SCREAMING_SNAKE_CASE ).groups()[0] elif _re_flax_models.match(__SCREAMING_SNAKE_CASE ) is not None: lowercase_ : Union[str, Any] = flax_models lowercase_ : Dict = _re_flax_models.match(__SCREAMING_SNAKE_CASE ).groups()[0] elif _re_pt_models.match(__SCREAMING_SNAKE_CASE ) is not None: lowercase_ : str = pt_models lowercase_ : Any = _re_pt_models.match(__SCREAMING_SNAKE_CASE ).groups()[0] if lookup_dict is not None: while len(__SCREAMING_SNAKE_CASE ) > 0: if attr_name in model_name_to_prefix.values(): lowercase_ : int = True break # Try again after removing the last word in the name lowercase_ : Optional[int] = ''''''.join(camel_case_split(__SCREAMING_SNAKE_CASE )[:-1] ) # Let's build that table! lowercase_ : List[Any] = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) lowercase_ : Any = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). lowercase_ : Any = [len(__SCREAMING_SNAKE_CASE ) + 2 for c in columns] lowercase_ : Any = max([len(__SCREAMING_SNAKE_CASE ) for name in model_names] ) + 2 # Build the table per se lowercase_ : str = '''|''' + '''|'''.join([_center_text(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c, w in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" lowercase_ : int = {True: '''✅''', False: '''❌'''} for name in model_names: lowercase_ : Dict = model_name_to_prefix[name] lowercase_ : Tuple = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for l, w in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] ) + "|\n" return table def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict=False ): """simple docstring""" lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = _find_text_in_file( filename=os.path.join(__SCREAMING_SNAKE_CASE , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) lowercase_ : str = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__SCREAMING_SNAKE_CASE , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": _lowercase : List[str] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _lowercase : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
93
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets __lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n' __lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n' __lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n' def _UpperCamelCase ( lowercase__ , lowercase__ ): return float((preds == labels).mean() ) def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) ) return { "accuracy": acc, "f1": fa, } def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ ) __SCREAMING_SNAKE_CASE : str = np.array(lowercase__ ) __SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0] # mean centering __SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 ) __SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 ) __SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' ) __SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10] __SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): '''simple docstring''' def __magic_name__( self :Tuple ) -> Tuple: if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), '''references''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , ) def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str: if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' )
9
0
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer lowercase = logging.get_logger(__name__) lowercase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowercase = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } lowercase = { '''facebook/blenderbot_small-90M''': 5_12, } class UpperCAmelCase_ ( snake_case_ ): '''simple docstring''' A : Dict = VOCAB_FILES_NAMES A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[int] = BlenderbotSmallTokenizer def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> Tuple: super().__init__( ByteLevelBPETokenizer( vocab=_SCREAMING_SNAKE_CASE , merges=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , ) , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ : str = add_prefix_space def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[Any]: snake_case_ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: snake_case_ : Dict = [self.sep_token_id] snake_case_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
352
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A : Any = MobileBertTokenizer A : Optional[int] = MobileBertTokenizerFast A : Any = True A : Optional[int] = True A : List[str] = filter_non_english A : Any = 'google/mobilebert-uncased' def _lowerCAmelCase ( self ) -> List[str]: super().setUp() snake_case_ : List[Any] = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) snake_case_ : Dict = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Dict: snake_case_ : int = "UNwant\u00E9d,running" snake_case_ : Dict = "unwanted, running" return input_text, output_text def _lowerCAmelCase ( self ) -> Dict: snake_case_ : int = self.tokenizer_class(self.vocab_file ) snake_case_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] ) def _lowerCAmelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return snake_case_ : List[str] = self.get_tokenizer() snake_case_ : str = self.get_rust_tokenizer() snake_case_ : str = "UNwant\u00E9d,running" snake_case_ : int = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) snake_case_ : Tuple = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ : int = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ : Optional[Any] = self.get_rust_tokenizer() snake_case_ : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE ) snake_case_ : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # With lower casing snake_case_ : Optional[Any] = self.get_tokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) snake_case_ : Dict = self.get_rust_tokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) snake_case_ : int = "UNwant\u00E9d,running" snake_case_ : Optional[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) snake_case_ : int = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ : List[str] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) snake_case_ : int = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ : Any = self.get_rust_tokenizer() snake_case_ : str = tokenizer.encode(_SCREAMING_SNAKE_CASE ) snake_case_ : List[Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self ) -> str: snake_case_ : Any = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _lowerCAmelCase ( self ) -> Union[str, Any]: snake_case_ : Optional[int] = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _lowerCAmelCase ( self ) -> Tuple: snake_case_ : Optional[Any] = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _lowerCAmelCase ( self ) -> Optional[Any]: snake_case_ : Dict = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _lowerCAmelCase ( self ) -> Union[str, Any]: snake_case_ : Union[str, Any] = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _lowerCAmelCase ( self ) -> str: snake_case_ : Dict = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _lowerCAmelCase ( self ) -> Union[str, Any]: snake_case_ : Any = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _lowerCAmelCase ( self ) -> Any: snake_case_ : Dict = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _lowerCAmelCase ( self ) -> List[Any]: snake_case_ : Any = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _lowerCAmelCase ( self ) -> Dict: snake_case_ : Tuple = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] snake_case_ : Optional[int] = {} for i, token in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ : Optional[Any] = i snake_case_ : List[Any] = WordpieceTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _lowerCAmelCase ( self ) -> Optional[Any]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _lowerCAmelCase ( self ) -> List[Any]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _lowerCAmelCase ( self ) -> int: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _lowerCAmelCase ( self ) -> Optional[Any]: snake_case_ : Optional[int] = self.get_tokenizer() snake_case_ : Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def _lowerCAmelCase ( self ) -> Optional[int]: snake_case_ : Union[str, Any] = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) snake_case_ : int = tokenizer.encode("sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE ) snake_case_ : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE ) snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _lowerCAmelCase ( self ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) snake_case_ : Dict = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' snake_case_ : Any = tokenizer_r.encode_plus( _SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , ) snake_case_ : List[Any] = tokenizer_r.do_lower_case if hasattr(_SCREAMING_SNAKE_CASE , "do_lower_case" ) else False snake_case_ : Optional[int] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _lowerCAmelCase ( self ) -> Tuple: snake_case_ : Union[str, Any] = ["的", "人", "有"] snake_case_ : int = "".join(_SCREAMING_SNAKE_CASE ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : Tuple = True snake_case_ : Union[str, Any] = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) snake_case_ : Optional[int] = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) snake_case_ : Tuple = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) snake_case_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) snake_case_ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ : Tuple = False snake_case_ : str = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) snake_case_ : List[str] = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) snake_case_ : Optional[int] = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) snake_case_ : Tuple = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) snake_case_ : List[str] = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) snake_case_ : Any = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) # it is expected that only the first Chinese character is not preceded by "##". snake_case_ : Union[str, Any] = [ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_SCREAMING_SNAKE_CASE ) ] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
36
0
import os def _snake_case ( ): """simple docstring""" with open(os.path.dirname(lowerCAmelCase ) + "/p022_names.txt" ) as file: SCREAMING_SNAKE_CASE_ : List[str] = str(file.readlines()[0] ) SCREAMING_SNAKE_CASE_ : Any = names.replace("\"" , "" ).split("," ) names.sort() SCREAMING_SNAKE_CASE_ : Optional[int] = 0 SCREAMING_SNAKE_CASE_ : Optional[int] = 0 for i, name in enumerate(lowerCAmelCase ): for letter in name: name_score += ord(lowerCAmelCase ) - 6_4 total_score += (i + 1) * name_score SCREAMING_SNAKE_CASE_ : Tuple = 0 return total_score if __name__ == "__main__": print(solution())
18
"""simple docstring""" import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _lowerCamelCase ( _lowercase , unittest.TestCase ): UpperCAmelCase_ = VideoToVideoSDPipeline UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"} UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"} UpperCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"} UpperCAmelCase_ = False # No `output_type`. UpperCAmelCase_ = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def snake_case_ (self ) -> List[Any]: torch.manual_seed(0 ) UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) UpperCamelCase = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) UpperCamelCase = CLIPTextModel(__a ) UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCamelCase = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def snake_case_ (self , __a , __a=0 ) -> Dict: # 3 frames UpperCamelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) if str(__a ).startswith("mps" ): UpperCamelCase = torch.manual_seed(__a ) else: UpperCamelCase = torch.Generator(device=__a ).manual_seed(__a ) UpperCamelCase = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def snake_case_ (self ) -> List[Any]: UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCamelCase = self.get_dummy_components() UpperCamelCase = VideoToVideoSDPipeline(**__a ) UpperCamelCase = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCamelCase = self.get_dummy_inputs(__a ) UpperCamelCase = "np" UpperCamelCase = sd_pipe(**__a ).frames UpperCamelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) UpperCamelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def snake_case_ (self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a , expected_max_diff=5e-3 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def snake_case_ (self ) -> List[Any]: pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def snake_case_ (self ) -> Optional[Any]: pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def snake_case_ (self ) -> Dict: pass def snake_case_ (self ) -> Optional[int]: return super().test_progress_bar() @slow @skip_mps class _lowerCamelCase ( unittest.TestCase ): def snake_case_ (self ) -> List[str]: UpperCamelCase = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase = torch.randn((1, 10, 3, 10_24, 5_76) , generator=__a ) UpperCamelCase = video.to("cuda" ) UpperCamelCase = "Spiderman is surfing" UpperCamelCase = pipe(__a , video=__a , generator=__a , num_inference_steps=3 , output_type="pt" ).frames UpperCamelCase = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
153
0
"""simple docstring""" import requests from bsa import BeautifulSoup def __a ( __lowerCamelCase = "https://www.worldometers.info/coronavirus" ): UpperCAmelCase_ : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text, "html.parser" ) UpperCAmelCase_ : Optional[Any] = soup.findAll("h1" ) UpperCAmelCase_ : Union[str, Any] = soup.findAll("div", {"class": "maincounter-number"} ) keys += soup.findAll("span", {"class": "panel-title"} ) values += soup.findAll("div", {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCamelCase, __lowerCamelCase )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(f"""{key}\n{value}\n""")
361
"""simple docstring""" def __a ( __lowerCamelCase ): assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0""" raise ValueError(__lowerCamelCase ) else: UpperCAmelCase_ : List[str] = sylvester(number - 1 ) UpperCAmelCase_ : List[str] = num - 1 UpperCAmelCase_ : List[str] = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
23
0
"""simple docstring""" import math def lowerCamelCase_ (UpperCamelCase__ : int ): _UpperCAmelCase : Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : float = 1 / 1_2345 ): _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : int = 0 _UpperCAmelCase : int = 3 while True: _UpperCAmelCase : Optional[int] = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(UpperCamelCase__ ): _UpperCAmelCase : Any = int(UpperCamelCase__ ) total_partitions += 1 if check_partition_perfect(UpperCamelCase__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(UpperCamelCase__ ) integer += 1 if __name__ == "__main__": print(f"{solution() = }")
263
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :List[str] = logging.get_logger(__name__) _lowerCAmelCase :Any = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''falcon''' a__ =['''past_key_values'''] def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any: _UpperCAmelCase : int = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A ) _UpperCAmelCase : int = hidden_size if n_embed is None else n_embed _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Optional[int] = layer_norm_epsilon _UpperCAmelCase : Tuple = initializer_range _UpperCAmelCase : Optional[int] = use_cache _UpperCAmelCase : Any = hidden_dropout _UpperCAmelCase : Dict = attention_dropout _UpperCAmelCase : Any = bos_token_id _UpperCAmelCase : List[Any] = eos_token_id _UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase : Dict = alibi _UpperCAmelCase : Optional[int] = new_decoder_architecture _UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase : Optional[int] = parallel_attn _UpperCAmelCase : Optional[int] = bias super().__init__(bos_token_id=A , eos_token_id=A , **A ) @property def __lowerCAmelCase ( self ) -> List[str]: return self.hidden_size // self.num_attention_heads @property def __lowerCAmelCase ( self ) -> List[Any]: return not self.alibi
263
1
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : int = RobertaTokenizer _lowerCamelCase : List[Any] = RobertaTokenizerFast _lowerCamelCase : List[Any] = True _lowerCamelCase : int = {'cls_token': '<s>'} def __A ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] A_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) ) A_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] A_ = {"unk_token": "<unk>"} A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase ) ) def __A ( self : Tuple , **UpperCAmelCase : Dict ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Dict , **UpperCAmelCase : int ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Dict , UpperCAmelCase : str ): A_ = "lower newer" A_ = "lower newer" return input_text, output_text def __A ( self : Optional[int] ): A_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) A_ = "lower newer" A_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] A_ = tokenizer.tokenize(UpperCAmelCase ) # , add_prefix_space=True) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokens + [tokenizer.unk_token] A_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase ) def __A ( self : str ): A_ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=UpperCAmelCase ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def __A ( self : Optional[Any] ): A_ = self.tokenizer_class.from_pretrained("roberta-base" ) A_ = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.encode( "sequence builders" , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase ) A_ = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase ) A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase ) A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __A ( self : int ): A_ = self.get_tokenizer() A_ = "Encode this sequence." A_ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(UpperCAmelCase , UpperCAmelCase ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(UpperCAmelCase , UpperCAmelCase ) # Testing spaces after special tokens A_ = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )} ) # mask token has a left space A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase ) A_ = "Encode <mask> sequence" A_ = "Encode <mask>sequence" A_ = tokenizer.encode(UpperCAmelCase ) A_ = encoded.index(UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokenizer.encode(UpperCAmelCase ) A_ = encoded.index(UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[str] ): pass def __A ( self : Dict ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) A_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) A_ = "A, <mask> AllenNLP sentence." A_ = tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) A_ = tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) A_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) A_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def __A ( self : Dict ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): A_ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) A_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , UpperCAmelCase ) self.assertEqual(post_processor_state["add_prefix_space"] , UpperCAmelCase ) self.assertEqual(post_processor_state["trim_offsets"] , UpperCAmelCase ) def __A ( self : int ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name` A_ = f'''{text_of_1_token} {text_of_1_token}''' A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase ) + 1, len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase ) + 1, len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase ), len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase ), len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ) + 1, 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ), 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ), 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
329
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a :Optional[Any] = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = original_name.split("." )[0] A_ = key.split("." ) A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] ) A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] ) A_ = orig_block_num - offset A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" A_ = OrderedDict() A_ , A_ = 0, 0 for key, value in state_dict.items(): if key.startswith("network" ): A_ = key.replace("network" ,"poolformer.encoder" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias" ) and "patch_embed" not in key: patch_emb_offset += 1 A_ = key[: key.find("proj" )] A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' ) A_ = key.replace("proj" ,"projection" ) if key.endswith("bias" ): total_embed_found += 1 if "patch_embeddings" in key: A_ = "poolformer.encoder." + key if "mlp.fc1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" ) if "mlp.fc2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" ) if "norm1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" ) if "norm2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" ) if "layer_scale_1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" ) if "layer_scale_2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" ) if "head" in key: A_ = key.replace("head" ,"classifier" ) A_ = value return new_state_dict def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return image @torch.no_grad() def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = PoolFormerConfig() # set attributes based on model_name A_ = "huggingface/label-files" A_ = model_name[-3:] A_ = 1000 A_ = "imagenet-1k-id2label.json" A_ = (1, 1000) # set config attributes A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} if size == "s12": A_ = [2, 2, 6, 2] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 0.9 elif size == "s24": A_ = [4, 4, 12, 4] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 0.9 elif size == "s36": A_ = [6, 6, 18, 6] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 1E-6 A_ = 0.9 elif size == "m36": A_ = [6, 6, 18, 6] A_ = [96, 192, 384, 768] A_ = 4.0 A_ = 1E-6 A_ = 0.95 elif size == "m48": A_ = [8, 8, 24, 8] A_ = [96, 192, 384, 768] A_ = 4.0 A_ = 1E-6 A_ = 0.95 else: raise ValueError(f'''Size {size} not supported''' ) # load image processor A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) # Prepare image A_ = prepare_img() A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values logger.info(f'''Converting model {model_name}...''' ) # load original state dict A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) ) # rename keys A_ = rename_keys(__UpperCamelCase ) # create HuggingFace model and load state dict A_ = PoolFormerForImageClassification(__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) model.eval() # Define image processor A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values # forward pass A_ = model(__UpperCamelCase ) A_ = outputs.logits # define expected logit slices for different models if size == "s12": A_ = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": A_ = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": A_ = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": A_ = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": A_ = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 ) # finally, save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __a :int = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
1
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 lowerCAmelCase__ = get_tests_dir('''fixtures''') class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : Tuple ): # A mock response for an HTTP head request to emulate server down __lowercase = mock.Mock() __lowercase = 5_0_0 __lowercase = {} __lowercase = HTTPError __lowercase = {} # Download this model to make sure it's in the cache. __lowercase = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' ,return_value=lowercase__ ) as mock_head: __lowercase = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def SCREAMING_SNAKE_CASE ( self : Optional[int] ): # This test is for deprecated behavior and can be removed in v5 __lowercase = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): with self.assertRaises(lowercase__ ): # config is in subfolder, the following should not work without specifying the subfolder __lowercase = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) __lowercase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' ,subfolder='''feature_extractor''' ) self.assertIsNotNone(lowercase__ ) @is_staging_test class lowercase_ (unittest.TestCase ): """simple docstring""" @classmethod def SCREAMING_SNAKE_CASE ( cls : Tuple ): __lowercase = TOKEN HfFolder.save_token(lowercase__ ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict ): try: delete_repo(token=cls._token ,repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = ViTImageProcessor.from_pretrained(lowercase__ ) image_processor.push_to_hub('''test-image-processor''' ,use_auth_token=self._token ) __lowercase = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowercase__ ,getattr(lowercase__ ,lowercase__ ) ) # Reset repo delete_repo(token=self._token ,repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( lowercase__ ,repo_id='''test-image-processor''' ,push_to_hub=lowercase__ ,use_auth_token=self._token ) __lowercase = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowercase__ ,getattr(lowercase__ ,lowercase__ ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = ViTImageProcessor.from_pretrained(lowercase__ ) image_processor.push_to_hub('''valid_org/test-image-processor''' ,use_auth_token=self._token ) __lowercase = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowercase__ ,getattr(lowercase__ ,lowercase__ ) ) # Reset repo delete_repo(token=self._token ,repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( lowercase__ ,repo_id='''valid_org/test-image-processor-org''' ,push_to_hub=lowercase__ ,use_auth_token=self._token ) __lowercase = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowercase__ ,getattr(lowercase__ ,lowercase__ ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): CustomImageProcessor.register_for_auto_class() __lowercase = CustomImageProcessor.from_pretrained(lowercase__ ) image_processor.push_to_hub('''test-dynamic-image-processor''' ,use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map ,{'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} ,) __lowercase = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor" ,trust_remote_code=lowercase__ ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ ,'''CustomImageProcessor''' )
104
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A_ : List[str] = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = ["YolosFeatureExtractor"] A_ : Optional[int] = ["YolosImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : str = [ "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", "YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
165
0
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Optional[int] = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys _lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
360
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowercase ( unittest.TestCase ): @slow def __snake_case( self : Optional[int] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" ) SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("xlm-roberta-base" ) SCREAMING_SNAKE_CASE = "The dog is cute and lives in the garden house" SCREAMING_SNAKE_CASE = jnp.array([tokenizer.encode(_UpperCamelCase )] ) SCREAMING_SNAKE_CASE = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim SCREAMING_SNAKE_CASE = jnp.array( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) SCREAMING_SNAKE_CASE = model(_UpperCamelCase )["last_hidden_state"] self.assertEqual(output.shape , _UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , _UpperCamelCase , atol=1e-3 ) )
206
0
"""simple docstring""" # Algorithm for the pigeonhole sorting def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict: SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ ) # min() finds the minimum value SCREAMING_SNAKE_CASE = max(SCREAMING_SNAKE_CASE_ ) # max() finds the maximum value SCREAMING_SNAKE_CASE = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size SCREAMING_SNAKE_CASE = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. SCREAMING_SNAKE_CASE = 0 for count in range(SCREAMING_SNAKE_CASE_ ): while holes[count] > 0: holes[count] -= 1 SCREAMING_SNAKE_CASE = count + min_val i += 1 def lowercase () -> Optional[Any]: SCREAMING_SNAKE_CASE = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(SCREAMING_SNAKE_CASE_ ) print('Sorted order is:' , ' '.join(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": main()
113
"""simple docstring""" __UpperCamelCase = frozenset( [ '''prompt''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) __UpperCamelCase = frozenset(['''prompt''', '''negative_prompt''']) __UpperCamelCase = frozenset([]) __UpperCamelCase = frozenset(['''image''']) __UpperCamelCase = frozenset( [ '''image''', '''height''', '''width''', '''guidance_scale''', ] ) __UpperCamelCase = frozenset(['''image''']) __UpperCamelCase = frozenset( [ '''prompt''', '''image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) __UpperCamelCase = frozenset(['''prompt''', '''image''', '''negative_prompt''']) __UpperCamelCase = frozenset( [ # Text guided image variation with an image mask '''prompt''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) __UpperCamelCase = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt''']) __UpperCamelCase = frozenset( [ # image variation with an image mask '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) __UpperCamelCase = frozenset(['''image''', '''mask_image''']) __UpperCamelCase = frozenset( [ '''example_image''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) __UpperCamelCase = frozenset(['''example_image''', '''image''', '''mask_image''']) __UpperCamelCase = frozenset(['''class_labels''']) __UpperCamelCase = frozenset(['''class_labels''']) __UpperCamelCase = frozenset(['''batch_size''']) __UpperCamelCase = frozenset([]) __UpperCamelCase = frozenset(['''batch_size''']) __UpperCamelCase = frozenset([]) __UpperCamelCase = frozenset( [ '''prompt''', '''audio_length_in_s''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) __UpperCamelCase = frozenset(['''prompt''', '''negative_prompt''']) __UpperCamelCase = frozenset(['''input_tokens''']) __UpperCamelCase = frozenset(['''input_tokens'''])
113
1
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase__ ( snake_case__, unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = LongformerTokenizer _UpperCAmelCase :Optional[Any] = True _UpperCAmelCase :Union[str, Any] = LongformerTokenizerFast _UpperCAmelCase :Union[str, Any] = True def UpperCAmelCase__ ( self : Union[str, Any] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase_ : Optional[int] =[ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowerCamelCase_ : Union[str, Any] =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowerCamelCase_ : List[str] =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowerCamelCase_ : Dict ={"unk_token": "<unk>"} lowerCamelCase_ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase__ ( self : Union[str, Any] , **snake_case__ : Optional[Any] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase__ ( self : Dict , **snake_case__ : List[str] ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase__ ( self : int , snake_case__ : Optional[int] ): lowerCamelCase_ : Any ="lower newer" lowerCamelCase_ : Union[str, Any] ="lower newer" return input_text, output_text def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : Dict =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCamelCase_ : Tuple ="lower newer" lowerCamelCase_ : str =["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] lowerCamelCase_ : Optional[int] =tokenizer.tokenize(snake_case__ ) # , add_prefix_space=True) self.assertListEqual(snake_case__ , snake_case__ ) lowerCamelCase_ : Dict =tokens + [tokenizer.unk_token] lowerCamelCase_ : str =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) def UpperCAmelCase__ ( self : Optional[int] ): lowerCamelCase_ : List[Any] =self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=snake_case__ ) , [0, 3_1414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=snake_case__ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , ) @slow def UpperCAmelCase__ ( self : Optional[Any] ): lowerCamelCase_ : str =self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" ) lowerCamelCase_ : Dict =tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) lowerCamelCase_ : Any =tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) lowerCamelCase_ : str =tokenizer.encode( "sequence builders" , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ ) lowerCamelCase_ : Any =tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ ) lowerCamelCase_ : Union[str, Any] =tokenizer.build_inputs_with_special_tokens(snake_case__ ) lowerCamelCase_ : str =tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase__ ( self : Tuple ): lowerCamelCase_ : Optional[int] =self.get_tokenizer() lowerCamelCase_ : int ="Encode this sequence." lowerCamelCase_ : Any =tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments lowerCamelCase_ : Union[str, Any] =tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ ) lowerCamelCase_ : Dict =tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(snake_case__ , snake_case__ ) lowerCamelCase_ : List[Any] =tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ ) lowerCamelCase_ : int =tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(snake_case__ , snake_case__ ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) lowerCamelCase_ : Dict =tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) lowerCamelCase_ : int =tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(snake_case__ , snake_case__ ) # Testing spaces after special tokens lowerCamelCase_ : Dict ="<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ )} ) # mask token has a left space lowerCamelCase_ : Dict =tokenizer.convert_tokens_to_ids(snake_case__ ) lowerCamelCase_ : Tuple ="Encode <mask> sequence" lowerCamelCase_ : Union[str, Any] ="Encode <mask>sequence" lowerCamelCase_ : str =tokenizer.encode(snake_case__ ) lowerCamelCase_ : Tuple =encoded.index(snake_case__ ) lowerCamelCase_ : Dict =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(snake_case__ , snake_case__ ) lowerCamelCase_ : List[Any] =tokenizer.encode(snake_case__ ) lowerCamelCase_ : Optional[Any] =encoded.index(snake_case__ ) lowerCamelCase_ : Union[str, Any] =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(snake_case__ , snake_case__ ) def UpperCAmelCase__ ( self : Dict ): pass def UpperCAmelCase__ ( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ : Dict =self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) lowerCamelCase_ : int =self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) lowerCamelCase_ : Dict ="A, <mask> AllenNLP sentence." lowerCamelCase_ : List[str] =tokenizer_r.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ ) lowerCamelCase_ : Optional[Any] =tokenizer_p.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowerCamelCase_ : Optional[Any] =tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowerCamelCase_ : str =tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( snake_case__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( snake_case__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def UpperCAmelCase__ ( self : int ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): lowerCamelCase_ : List[Any] =self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ ) lowerCamelCase_ : List[str] =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowerCamelCase_ : List[str] =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , snake_case__ ) self.assertEqual(post_processor_state["add_prefix_space"] , snake_case__ ) self.assertEqual(post_processor_state["trim_offsets"] , snake_case__ ) def UpperCAmelCase__ ( self : int ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ : Any ="hello" # `hello` is a token in the vocabulary of `pretrained_name` lowerCamelCase_ : Any =F"""{text_of_1_token} {text_of_1_token}""" lowerCamelCase_ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained( snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ ) lowerCamelCase_ : Optional[int] =tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) , ) lowerCamelCase_ : Any =self.rust_tokenizer_class.from_pretrained( snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ ) lowerCamelCase_ : Any =tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) , ) lowerCamelCase_ : List[Any] =self.rust_tokenizer_class.from_pretrained( snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ ) lowerCamelCase_ : str =tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(snake_case__ ), len(snake_case__ ) + 1 + len(snake_case__ )) , ) lowerCamelCase_ : Optional[Any] =self.rust_tokenizer_class.from_pretrained( snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ ) lowerCamelCase_ : List[Any] =tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(snake_case__ ), len(snake_case__ ) + 1 + len(snake_case__ )) , ) lowerCamelCase_ : Any =F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowerCamelCase_ : Tuple =self.rust_tokenizer_class.from_pretrained( snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ ) lowerCamelCase_ : List[Any] =tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(snake_case__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) , ) lowerCamelCase_ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained( snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ ) lowerCamelCase_ : Union[str, Any] =tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(snake_case__ ), 1 + len(snake_case__ ) + 1 + len(snake_case__ )) , ) lowerCamelCase_ : List[str] =self.rust_tokenizer_class.from_pretrained( snake_case__ , use_fast=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ ) lowerCamelCase_ : Optional[int] =tokenizer_r(snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(snake_case__ ), 1 + len(snake_case__ ) + 1 + len(snake_case__ )) , )
356
"""simple docstring""" def _snake_case ( lowerCamelCase__ : int ) -> int: if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise TypeError("only integers accepted as input" ) else: lowerCamelCase_ : str =str(abs(lowerCamelCase__ ) ) lowerCamelCase_ : Tuple =[list(lowerCamelCase__ ) for char in range(len(lowerCamelCase__ ) )] for index in range(len(lowerCamelCase__ ) ): num_transpositions[index].pop(lowerCamelCase__ ) return max( int("".join(list(lowerCamelCase__ ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
209
0
import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib UpperCAmelCase : Optional[int] = threading.Lock() UpperCAmelCase : Optional[logging.Handler] = None UpperCAmelCase : str = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } UpperCAmelCase : List[str] = logging.WARNING UpperCAmelCase : List[Any] = True def __lowerCamelCase ( ): '''simple docstring''' lowerCamelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowerCamelCase__ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ' f'has to be one of: { ", ".join(log_levels.keys() ) }' ) return _default_log_level def __lowerCamelCase ( ): '''simple docstring''' return __name__.split(""".""" )[0] def __lowerCamelCase ( ): '''simple docstring''' return logging.getLogger(_get_library_name() ) def __lowerCamelCase ( ): '''simple docstring''' global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return lowerCamelCase = logging.StreamHandler() # Set sys.stderr as stream. lowerCamelCase = sys.stderr.flush # Apply our default configuration to the library root logger. lowerCamelCase = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) lowerCamelCase = False def __lowerCamelCase ( ): '''simple docstring''' global _default_handler with _lock: if not _default_handler: return lowerCamelCase = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) lowerCamelCase = None def __lowerCamelCase ( ): '''simple docstring''' return log_levels def __lowerCamelCase ( lowerCamelCase__ : Optional[str] = None ): '''simple docstring''' if name is None: lowerCamelCase = _get_library_name() _configure_library_root_logger() return logging.getLogger(lowerCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def __lowerCamelCase ( lowerCamelCase__ : int ): '''simple docstring''' _configure_library_root_logger() _get_library_root_logger().setLevel(lowerCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' return set_verbosity(lowerCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' return set_verbosity(lowerCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' return set_verbosity(lowerCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' return set_verbosity(lowerCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def __lowerCamelCase ( ): '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def __lowerCamelCase ( lowerCamelCase__ : logging.Handler ): '''simple docstring''' _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(lowerCamelCase__ ) def __lowerCamelCase ( lowerCamelCase__ : logging.Handler ): '''simple docstring''' _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(lowerCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' _configure_library_root_logger() lowerCamelCase = False def __lowerCamelCase ( ): '''simple docstring''' _configure_library_root_logger() lowerCamelCase = True def __lowerCamelCase ( ): '''simple docstring''' lowerCamelCase = _get_library_root_logger().handlers for handler in handlers: lowerCamelCase = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" ) handler.setFormatter(lowerCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' lowerCamelCase = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(lowerCamelCase__ ) def __lowerCamelCase ( self : str , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[Any] ): '''simple docstring''' lowerCamelCase = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , lowerCamelCase__ ) if no_advisory_warnings: return self.warning(*lowerCamelCase__ , **lowerCamelCase__ ) UpperCAmelCase : Optional[int] = warning_advice @functools.lru_cache(lowerCamelCase__ ) def __lowerCamelCase ( self : Any , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Union[str, Any] ): '''simple docstring''' self.warning(*lowerCamelCase__ , **lowerCamelCase__ ) UpperCAmelCase : Dict = warning_once class __lowercase : """simple docstring""" def __init__( self , *A , **A ) -> List[Any]: # pylint: disable=unused-argument '''simple docstring''' lowerCamelCase = args[0] if args else None def __iter__( self ) -> int: '''simple docstring''' return iter(self._iterator ) def __getattr__( self , A ) -> Any: '''simple docstring''' def empty_fn(*A , **A ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ) -> Optional[Any]: '''simple docstring''' return self def __exit__( self , A , A , A ) -> int: '''simple docstring''' return class __lowercase : """simple docstring""" def __call__( self , *A , **A ) -> Optional[Any]: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm(*A , **A ) else: return EmptyTqdm(*A , **A ) def __A ( self , *A , **A ) -> List[Any]: '''simple docstring''' lowerCamelCase = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*A , **A ) def __A ( self ) -> Tuple: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() UpperCAmelCase : Union[str, Any] = _tqdm_cls() def __lowerCamelCase ( ): '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def __lowerCamelCase ( ): '''simple docstring''' global _tqdm_active lowerCamelCase = True hf_hub_utils.enable_progress_bars() def __lowerCamelCase ( ): '''simple docstring''' global _tqdm_active lowerCamelCase = False hf_hub_utils.disable_progress_bars()
252
import re import string import numpy as np import datasets UpperCAmelCase : List[str] = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" UpperCAmelCase : str = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" UpperCAmelCase : Dict = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): """simple docstring""" def __A ( self ) -> List[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def __A ( self , A , A , A=None , A=False , A=False , A=False , ) -> List[str]: '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: lowerCamelCase = np.array([re.sub(A , """""" , A ) for x in predictions] ) lowerCamelCase = np.array([re.sub(A , """""" , A ) for x in references] ) else: lowerCamelCase = np.asarray(A ) lowerCamelCase = np.asarray(A ) if ignore_case: lowerCamelCase = np.char.lower(A ) lowerCamelCase = np.char.lower(A ) if ignore_punctuation: lowerCamelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation ) lowerCamelCase = np.char.translate(A , table=A ) lowerCamelCase = np.char.translate(A , table=A ) if ignore_numbers: lowerCamelCase = string.digits.maketrans("""""" , """""" , string.digits ) lowerCamelCase = np.char.translate(A , table=A ) lowerCamelCase = np.char.translate(A , table=A ) lowerCamelCase = predictions == references return {"exact_match": np.mean(A ) * 1_00}
252
1
"""simple docstring""" import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=None ) -> Union[str, Any]: """simple docstring""" if "." in tensor_name: snake_case = tensor_name.split('.' ) for split in splits[:-1]: snake_case = getattr(_UpperCamelCase , _UpperCamelCase ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case = new_module snake_case = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) snake_case = tensor_name in module._buffers snake_case = getattr(_UpperCamelCase , _UpperCamelCase ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) snake_case = False snake_case = False if is_buffer or not is_bitsandbytes_available(): snake_case = False snake_case = False else: snake_case = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) snake_case = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: snake_case = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: snake_case = old_value.to(_UpperCamelCase ) elif isinstance(_UpperCamelCase , torch.Tensor ): snake_case = value.to('cpu' ) if value.dtype == torch.inta: snake_case = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: snake_case = torch.tensor(_UpperCamelCase , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , _UpperCamelCase ) and fpaa_statistics is None: snake_case = new_value.T snake_case = old_value.__dict__ if is_abit: snake_case = bnb.nn.IntaParams(_UpperCamelCase , requires_grad=_UpperCamelCase , **_UpperCamelCase ).to(_UpperCamelCase ) elif is_abit: snake_case = bnb.nn.Paramsabit(_UpperCamelCase , requires_grad=_UpperCamelCase , **_UpperCamelCase ).to(_UpperCamelCase ) snake_case = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(_UpperCamelCase ) ) else: if value is None: snake_case = old_value.to(_UpperCamelCase ) elif isinstance(_UpperCamelCase , torch.Tensor ): snake_case = value.to(_UpperCamelCase ) else: snake_case = torch.tensor(_UpperCamelCase , device=_UpperCamelCase ) if is_buffer: snake_case = new_value else: snake_case = nn.Parameter(_UpperCamelCase , requires_grad=old_value.requires_grad ) snake_case = new_value def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Optional[int]=False ) -> Optional[int]: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: snake_case = [] current_key_name.append(_UpperCamelCase ) if (isinstance(_UpperCamelCase , nn.Linear ) or isinstance(_UpperCamelCase , _UpperCamelCase )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(_UpperCamelCase ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case ,snake_case = module.weight.shape else: snake_case = module.in_features snake_case = module.out_features if quantization_config.quantization_method() == "llm_int8": snake_case = bnb.nn.LinearabitLt( _UpperCamelCase , _UpperCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) snake_case = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: snake_case = bnb.nn.Linearabit( _UpperCamelCase , _UpperCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) snake_case = True # Store the module class in case we need to transpose the weight later snake_case = type(_UpperCamelCase ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(_UpperCamelCase ) if len(list(module.children() ) ) > 0: snake_case ,snake_case = _replace_with_bnb_linear( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_been_replaced=_UpperCamelCase , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : str=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Union[str, Any]=None ) -> Union[str, Any]: """simple docstring""" snake_case = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert snake_case ,snake_case = _replace_with_bnb_linear( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def lowerCAmelCase__ ( *_UpperCamelCase : Any , **_UpperCamelCase : int ) -> str: """simple docstring""" warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , _UpperCamelCase , ) return replace_with_bnb_linear(*_UpperCamelCase , **_UpperCamelCase ) def lowerCAmelCase__ ( *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Dict ) -> Tuple: """simple docstring""" warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , _UpperCamelCase , ) return set_module_quantized_tensor_to_device(*_UpperCamelCase , **_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> Dict: """simple docstring""" snake_case = deepcopy(_UpperCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() snake_case = find_tied_parameters(_UpperCamelCase ) # For compatibility with Accelerate < 0.18 if isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case = sum(_UpperCamelCase , [] ) snake_case = len(_UpperCamelCase ) > 0 # Check if it is a base model snake_case = not hasattr(_UpperCamelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case = list(model.named_children() ) snake_case = [list_modules[-1][0]] # add last module together with tied weights snake_case = set(_UpperCamelCase ) - set(_UpperCamelCase ) snake_case = list(set(_UpperCamelCase ) ) + list(_UpperCamelCase ) # remove ".weight" from the keys snake_case = ['.weight', '.bias'] snake_case = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case = name.replace(_UpperCamelCase , '' ) filtered_module_names.append(_UpperCamelCase ) return filtered_module_names
149
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" snake_case = torch.nn.Linear(10 , 10 ) snake_case = torch.optim.SGD(model.parameters() , 0.1 ) snake_case = Accelerator() snake_case = accelerator.prepare(lowerCAmelCase ) try: pickle.loads(pickle.dumps(lowerCAmelCase ) ) except Exception as e: self.fail(F"""Accelerated optimizer pickling failed with {e}""" ) AcceleratorState._reset_state()
149
1
from __future__ import annotations lowerCAmelCase : Any = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] lowerCAmelCase : Union[str, Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def A_ ( a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = [] SCREAMING_SNAKE_CASE_ : Optional[int] = len(a ) for i in range(a ): SCREAMING_SNAKE_CASE_ : float = -1 for j in range(i + 1 , a ): if arr[i] < arr[j]: SCREAMING_SNAKE_CASE_ : Union[str, Any] = arr[j] break result.append(a ) return result def A_ ( a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = [] for i, outer in enumerate(a ): SCREAMING_SNAKE_CASE_ : float = -1 for inner in arr[i + 1 :]: if outer < inner: SCREAMING_SNAKE_CASE_ : Union[str, Any] = inner break result.append(a ) return result def A_ ( a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = len(a ) SCREAMING_SNAKE_CASE_ : list[float] = [] SCREAMING_SNAKE_CASE_ : list[float] = [-1] * arr_size for index in reversed(range(a ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: SCREAMING_SNAKE_CASE_ : List[Any] = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) lowerCAmelCase : List[str] = ( 'from __main__ import arr, next_greatest_element_slow, ' 'next_greatest_element_fast, next_greatest_element' ) print( 'next_greatest_element_slow():', timeit('next_greatest_element_slow(arr)', setup=setup), ) print( 'next_greatest_element_fast():', timeit('next_greatest_element_fast(arr)', setup=setup), ) print( ' next_greatest_element():', timeit('next_greatest_element(arr)', setup=setup), )
253
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Any = {'vocab_file': 'spiece.model'} lowerCAmelCase : Tuple = { 'vocab_file': { 'bert_for_seq_generation': ( 'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model' ), } } lowerCAmelCase : Optional[int] = {'bert_for_seq_generation': 5_12} class _A ( __magic_name__): SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[int] = [] SCREAMING_SNAKE_CASE : Dict = ['''input_ids''', '''attention_mask'''] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<::::>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ : List[str] = vocab_file SCREAMING_SNAKE_CASE_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_SCREAMING_SNAKE_CASE ) @property def UpperCAmelCase ( self ): """simple docstring""" return self.sp_model.get_piece_size() def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = self.__dict__.copy() SCREAMING_SNAKE_CASE_ : List[Any] = None return state def __setstate__( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): SCREAMING_SNAKE_CASE_ : Dict = {} SCREAMING_SNAKE_CASE_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE ) return token def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [] SCREAMING_SNAKE_CASE_ : Optional[int] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token SCREAMING_SNAKE_CASE_ : Optional[int] = [] else: current_sub_tokens.append(_SCREAMING_SNAKE_CASE ) out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) return out_string.strip() def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi: SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.serialized_model_proto() fi.write(_SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
253
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase = logging.get_logger(__name__) class __a ( SCREAMING_SNAKE_CASE__ ): __lowercase : Dict = ['pixel_values'] def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[Any]: '''simple docstring''' super().__init__(**a_ ) lowercase__: Union[str, Any] = size if size is not None else {'shortest_edge': 256} lowercase__: Tuple = get_size_dict(a_ , default_to_square=a_ ) lowercase__: Dict = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowercase__: List[str] = get_size_dict(a_ ) lowercase__: int = do_resize lowercase__: int = size lowercase__: Union[str, Any] = resample lowercase__: Optional[Any] = do_center_crop lowercase__: str = crop_size lowercase__: Optional[int] = do_rescale lowercase__: str = rescale_factor lowercase__: Optional[Any] = do_normalize lowercase__: List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase__: Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> int: '''simple docstring''' lowercase__: Any = get_size_dict(a_ , default_to_square=a_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) lowercase__: List[str] = get_resize_output_image_size(a_ , size=size['shortest_edge'] , default_to_square=a_ ) return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> str: '''simple docstring''' lowercase__: List[str] = get_size_dict(a_ ) return center_crop(a_ , size=(size['height'], size['width']) , data_format=a_ , **a_ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Any: '''simple docstring''' return rescale(a_ , scale=a_ , data_format=a_ , **a_ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Dict: '''simple docstring''' return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> Dict: '''simple docstring''' lowercase__: Optional[int] = do_resize if do_resize is not None else self.do_resize lowercase__: Dict = size if size is not None else self.size lowercase__: str = get_size_dict(a_ , default_to_square=a_ ) lowercase__: Union[str, Any] = resample if resample is not None else self.resample lowercase__: str = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__: int = crop_size if crop_size is not None else self.crop_size lowercase__: Any = get_size_dict(a_ ) lowercase__: Optional[int] = do_rescale if do_rescale is not None else self.do_rescale lowercase__: Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__: List[Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__: List[Any] = image_mean if image_mean is not None else self.image_mean lowercase__: str = image_std if image_std is not None else self.image_std lowercase__: Optional[Any] = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowercase__: Tuple = [to_numpy_array(a_ ) for image in images] if do_resize: lowercase__: Tuple = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images] if do_center_crop: lowercase__: Dict = [self.center_crop(image=a_ , size=a_ ) for image in images] if do_rescale: lowercase__: Union[str, Any] = [self.rescale(image=a_ , scale=a_ ) for image in images] if do_normalize: lowercase__: int = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images] lowercase__: List[Any] = [to_channel_dimension_format(a_ , a_ ) for image in images] lowercase__: str = {'pixel_values': images} return BatchFeature(data=a_ , tensor_type=a_ )
359
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __lowerCAmelCase = '''\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __lowerCAmelCase = '''\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. ''' __lowerCAmelCase = ''' Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: \'score\' (float): The chrF (chrF++) score, \'char_order\' (int): The character n-gram order, \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, \'beta\' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' if version.parse(scb.__version__ ) < version.parse('1.4.12' ): raise ImportWarning( 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n' 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[ 'https://github.com/m-popovic/chrF', ] , ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = CHRF.CHAR_ORDER , lowerCAmelCase__ = CHRF.WORD_ORDER , lowerCAmelCase__ = CHRF.BETA , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> List[Any]: '''simple docstring''' lowercase__: str = len(references[0] ) if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) lowercase__: List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )] lowercase__: Union[str, Any] = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) lowercase__: str = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
288
0