code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def _lowerCamelCase ( a_ : int): if not isinstance(a_ , a_): raise ValueError('''multiplicative_persistence() only accepts integral values''') if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''') lowerCamelCase :Optional[Any] = 0 lowerCamelCase :Tuple = str(a_) while len(a_) != 1: lowerCamelCase :Dict = [int(a_) for i in num_string] lowerCamelCase :Union[str, Any] = 1 for i in range(0 , len(a_)): total *= numbers[i] lowerCamelCase :Union[str, Any] = str(a_) steps += 1 return steps def _lowerCamelCase ( a_ : int): if not isinstance(a_ , a_): raise ValueError('''additive_persistence() only accepts integral values''') if num < 0: raise ValueError('''additive_persistence() does not accept negative values''') lowerCamelCase :Dict = 0 lowerCamelCase :str = str(a_) while len(a_) != 1: lowerCamelCase :int = [int(a_) for i in num_string] lowerCamelCase :List[Any] = 0 for i in range(0 , len(a_)): total += numbers[i] lowerCamelCase :Union[str, Any] = str(a_) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
49
import operator as op def _lowerCamelCase ( a_ : Tuple): lowerCamelCase :int = [] lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation lowerCamelCase :Optional[int] = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''') print('''-''' * (30 + len(a_))) for x in post_fix: if x.isdigit(): # if x in digit stack.append(a_) # append x to stack # output in tabular format print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') else: lowerCamelCase :Optional[Any] = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') lowerCamelCase :str = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') stack.append( str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , ) return int(stack[0]) if __name__ == "__main__": A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """) print("""\n\tResult = """, solve(Postfix))
49
1
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _lowerCamelCase ( a_ : int): return 1 / (1 + np.exp(-z)) def _lowerCamelCase ( a_ : Dict , a_ : Any): return (-y * np.log(a_) - (1 - y) * np.log(1 - h)).mean() def _lowerCamelCase ( a_ : Tuple , a_ : Optional[Any] , a_ : Union[str, Any]): lowerCamelCase :Tuple = np.dot(a_ , a_) return np.sum(y * scores - np.log(1 + np.exp(a_))) def _lowerCamelCase ( a_ : Tuple , a_ : str , a_ : Dict , a_ : Optional[Any]=7_00_00): lowerCamelCase :Any = np.zeros(x.shape[1]) for iterations in range(a_): lowerCamelCase :Union[str, Any] = np.dot(a_ , a_) lowerCamelCase :str = sigmoid_function(a_) lowerCamelCase :List[str] = np.dot(x.T , h - y) / y.size lowerCamelCase :Dict = theta - alpha * gradient # updating the weights lowerCamelCase :Any = np.dot(a_ , a_) lowerCamelCase :Optional[int] = sigmoid_function(a_) lowerCamelCase :List[Any] = cost_function(a_ , a_) if iterations % 1_00 == 0: print(F"loss: {j} \t") # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": A__ = datasets.load_iris() A__ = iris.data[:, :2] A__ = (iris.target != 0) * 1 A__ = 0.1 A__ = logistic_reg(alpha, x, y, max_iterations=70_000) print("""theta: """, theta) # printing the theta i.e our weights vector def _lowerCamelCase ( a_ : Tuple): return sigmoid_function( np.dot(a_ , a_)) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""") ((A__) , (A__)) = (x[:, 0].min(), x[:, 0].max()) ((A__) , (A__)) = (x[:, 1].min(), x[:, 1].max()) ((A__) , (A__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) A__ = np.c_[xxa.ravel(), xxa.ravel()] A__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""") plt.legend() plt.show()
49
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() A__ = logging.get_logger(__name__) A__ = """Hello, World!""" A__ = """en_XX""" def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool): lowerCamelCase :int = Path('''data_bin''') lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , ) xmod.eval() # disable dropout print(a_) lowerCamelCase :Any = xmod.model.encoder.sentence_encoder lowerCamelCase :List[str] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our X-MOD config:''' , a_) lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_) model.eval() # Now let's copy all the weights. # Embeddings lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight lowerCamelCase :List[str] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them. lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers): # Encoder: start of layer lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i] lowerCamelCase :List[str] = xmod_sent_encoder.layers[i] # self attention lowerCamelCase :Optional[int] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) ): raise AssertionError('''Dimensions of self-attention weights do not match.''') lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias # self-attention output lowerCamelCase :Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''') lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias # intermediate lowerCamelCase :Optional[int] = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''') lowerCamelCase :int = xmod_layer.fca.weight lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias # output lowerCamelCase :List[str] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''') lowerCamelCase :str = xmod_layer.fca.weight lowerCamelCase :int = xmod_layer.fca.bias lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()): raise AssertionError('''Lists of language adapters do not match.''') for lang_code, adapter in xmod_layer.adapter_modules.items(): lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code] lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code] lowerCamelCase :List[Any] = from_adapter.fca.weight lowerCamelCase :List[Any] = from_adapter.fca.bias lowerCamelCase :Dict = from_adapter.fca.weight lowerCamelCase :Optional[Any] = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight lowerCamelCase :Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1 model.roberta.set_default_language(a_) lowerCamelCase :Any = model(a_)[0] if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_)) else: lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0] print(our_output.shape , their_output.shape) lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item() print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7 lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''') if not success: raise Exception('''Something went wRoNg''') Path(a_).mkdir(parents=a_ , exist_ok=a_) print(F"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(a_) if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) A__ = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
49
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """SEW_PRETRAINED_MODEL_ARCHIVE_LIST""", """SEWForCTC""", """SEWForSequenceClassification""", """SEWModel""", """SEWPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { """andreasmadsen/efficient_mlm_m0.40""": ( """https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json""" ), } class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'roberta-prelayernorm' def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) lowerCamelCase :Optional[int] = vocab_size lowerCamelCase :Dict = hidden_size lowerCamelCase :Tuple = num_hidden_layers lowerCamelCase :Optional[int] = num_attention_heads lowerCamelCase :Any = hidden_act lowerCamelCase :List[Any] = intermediate_size lowerCamelCase :Union[str, Any] = hidden_dropout_prob lowerCamelCase :str = attention_probs_dropout_prob lowerCamelCase :Tuple = max_position_embeddings lowerCamelCase :int = type_vocab_size lowerCamelCase :Optional[Any] = initializer_range lowerCamelCase :Union[str, Any] = layer_norm_eps lowerCamelCase :Dict = position_embedding_type lowerCamelCase :List[Any] = use_cache lowerCamelCase :Optional[int] = classifier_dropout class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): @property def snake_case ( self : Any ): if self.task == "multiple-choice": lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
49
1
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging A__ = logging.get_logger(__name__) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['pixel_values'] def __init__( self : str , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 255 , __snake_case : bool = True , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__snake_case : Tuple , ): super().__init__(**__snake_case ) lowerCamelCase :Optional[int] = size if size is not None else {'''shortest_edge''': 224} lowerCamelCase :str = get_size_dict(__snake_case , default_to_square=__snake_case ) lowerCamelCase :Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase :List[str] = get_size_dict(__snake_case , param_name='''crop_size''' ) lowerCamelCase :Tuple = do_resize lowerCamelCase :Dict = size lowerCamelCase :Optional[Any] = resample lowerCamelCase :List[str] = do_center_crop lowerCamelCase :List[Any] = crop_size lowerCamelCase :Dict = do_rescale lowerCamelCase :str = rescale_factor lowerCamelCase :str = do_normalize lowerCamelCase :Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase :Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def snake_case ( self : Tuple , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : int , ): lowerCamelCase :Union[str, Any] = get_size_dict(__snake_case , default_to_square=__snake_case ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase :List[str] = int((256 / 224) * size['''shortest_edge'''] ) lowerCamelCase :Union[str, Any] = get_resize_output_image_size(__snake_case , size=__snake_case , default_to_square=__snake_case ) lowerCamelCase :List[str] = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" ) return resize( __snake_case , size=(size_dict['''height'''], size_dict['''width''']) , resample=__snake_case , data_format=__snake_case , **__snake_case ) def snake_case ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ): lowerCamelCase :Optional[Any] = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" ) return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case ) def snake_case ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Any , ): return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def snake_case ( self : int , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ): return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def snake_case ( self : List[Any] , __snake_case : ImageInput , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : PILImageResampling = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[float] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[TensorType] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : Dict , ): lowerCamelCase :Optional[Any] = do_resize if do_resize is not None else self.do_resize lowerCamelCase :Optional[Any] = resample if resample is not None else self.resample lowerCamelCase :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase :Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase :List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase :Optional[int] = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase :Dict = image_mean if image_mean is not None else self.image_mean lowerCamelCase :Tuple = image_std if image_std is not None else self.image_std lowerCamelCase :Any = size if size is not None else self.size lowerCamelCase :int = get_size_dict(__snake_case , default_to_square=__snake_case ) lowerCamelCase :Optional[int] = crop_size if crop_size is not None else self.crop_size lowerCamelCase :Tuple = get_size_dict(__snake_case , param_name='''crop_size''' ) lowerCamelCase :List[Any] = make_list_of_images(__snake_case ) if not valid_images(__snake_case ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCamelCase :Tuple = [to_numpy_array(__snake_case ) for image in images] if do_resize: lowerCamelCase :Optional[int] = [self.resize(__snake_case , __snake_case , __snake_case ) for image in images] if do_center_crop: lowerCamelCase :Any = [self.center_crop(__snake_case , __snake_case ) for image in images] if do_rescale: lowerCamelCase :int = [self.rescale(__snake_case , __snake_case ) for image in images] if do_normalize: lowerCamelCase :Optional[Any] = [self.normalize(__snake_case , __snake_case , __snake_case ) for image in images] lowerCamelCase :int = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images] lowerCamelCase :List[str] = {'''pixel_values''': images} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
49
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = DebertaTokenizer _UpperCAmelCase = True _UpperCAmelCase = DebertaTokenizerFast def snake_case ( self : List[str] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase :Dict = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''} lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__snake_case ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__snake_case ) ) def snake_case ( self : str , **__snake_case : Dict ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Optional[Any] , __snake_case : int ): lowerCamelCase :List[Any] = '''lower newer''' lowerCamelCase :List[str] = '''lower newer''' return input_text, output_text def snake_case ( self : str ): lowerCamelCase :Optional[int] = self.get_tokenizer() lowerCamelCase :Union[str, Any] = '''lower newer''' lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) lowerCamelCase :List[str] = tokens + [tokenizer.unk_token] lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case ) def snake_case ( self : Optional[int] ): lowerCamelCase :List[str] = self.get_tokenizer() lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' ) lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __snake_case ) @slow def snake_case ( self : str ): lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case ) lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case ) lowerCamelCase :Union[str, Any] = tokenizer.encode( '''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case ) lowerCamelCase :str = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case ) lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case ) lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def snake_case ( self : str ): lowerCamelCase :List[str] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase :Tuple = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case ) lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']] # fmt: off lowerCamelCase :Any = { '''input_ids''': [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on lowerCamelCase :Optional[int] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __snake_case ) for expected, decoded in zip(__snake_case , __snake_case ): self.assertEqual(__snake_case , __snake_case )
49
1
from __future__ import annotations def _lowerCamelCase ( a_ : list[int]): # This function is recursive lowerCamelCase :Union[str, Any] = len(a_) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowerCamelCase :Optional[Any] = array[0] lowerCamelCase :Any = False lowerCamelCase :Tuple = 1 lowerCamelCase :list[int] = [] while not is_found and i < array_length: if array[i] < pivot: lowerCamelCase :Any = True lowerCamelCase :Optional[int] = [element for element in array[i:] if element >= array[i]] lowerCamelCase :List[Any] = longest_subsequence(a_) if len(a_) > len(a_): lowerCamelCase :int = temp_array else: i += 1 lowerCamelCase :Optional[int] = [element for element in array[1:] if element >= pivot] lowerCamelCase :Union[str, Any] = [pivot, *longest_subsequence(a_)] if len(a_) > len(a_): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
49
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) A__ = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): lowerCamelCase :Tuple = None lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) lowerCamelCase :Optional[int] = os.path.abspath('''examples''' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ): lowerCamelCase :Union[str, Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) lowerCamelCase :int = '''\n'''.join(__snake_case ) if special_strings is not None: for string in special_strings: lowerCamelCase :int = diff.replace(__snake_case , '''''' ) self.assertEqual(__snake_case , '''''' ) def snake_case ( self : Dict ): self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) lowerCamelCase :Optional[int] = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} ) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = False @classmethod def snake_case ( cls : Optional[Any] ): super().setUpClass() lowerCamelCase :Any = tempfile.mkdtemp() lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case ( cls : Dict ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def snake_case ( self : int ): lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def snake_case ( self : List[Any] ): lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() lowerCamelCase :List[Any] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def snake_case ( self : List[str] ): lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split() lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) def snake_case ( self : str ): lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split() lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): lowerCamelCase :Union[str, Any] = torch.cuda.device_count() else: lowerCamelCase :Dict = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) else: self.assertIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) @slow def snake_case ( self : Any ): lowerCamelCase :Tuple = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case ) lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1] lowerCamelCase :List[str] = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def snake_case ( self : int ): lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : Any ): with tempfile.TemporaryDirectory() as tmpdir: lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) ) def snake_case ( self : Tuple ): lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def snake_case ( self : Optional[Any] ): lowerCamelCase :int = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
49
1
import os from typing import Dict, List, Tuple, TypeVar, Union A__ = TypeVar("""T""") A__ = Union[List[T], Tuple[T, ...]] A__ = Union[T, List[T], Dict[str, T]] A__ = Union[str, bytes, os.PathLike]
49
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""") A__ = cvtColor(img, COLOR_BGR2GRAY) def _lowerCamelCase ( ): lowerCamelCase :int = cn.convert_to_negative(a_) # assert negative_img array for at least one True assert negative_img.any() def _lowerCamelCase ( ): with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img: # Work around assertion for response assert str(cc.change_contrast(a_ , 1_10)).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''') def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4) # Assert ambiguous array assert resp.all() def _lowerCamelCase ( ): lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0) # assert ambiguous array for all == True assert canny_img.all() lowerCamelCase :Optional[Any] = canny.canny(a_) # assert canny array for at least one True assert canny_array.any() def _lowerCamelCase ( ): assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all() def _lowerCamelCase ( ): # laplace diagonals lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_) assert res.any() def _lowerCamelCase ( ): assert med.median_filter(a_ , 3).any() def _lowerCamelCase ( ): lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_) assert grad.any() and theta.any() def _lowerCamelCase ( ): lowerCamelCase :Dict = sp.make_sepia(a_ , 20) assert sepia.all() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"): lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20) burkes.process() assert burkes.output_img.any() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ): lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00) nn.process() assert nn.output.any() def _lowerCamelCase ( ): lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. lowerCamelCase :Tuple = imread(a_ , 0) # Test for get_neighbors_pixel function() return not None lowerCamelCase :Dict = 0 lowerCamelCase :Optional[Any] = 0 lowerCamelCase :str = image[x_coordinate][y_coordinate] lowerCamelCase :Any = lbp.get_neighbors_pixel( a_ , a_ , a_ , a_) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1])) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0]): for j in range(0 , image.shape[1]): lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_) assert lbp_image.any()
49
1
A__ = { 0: """0""", 1: """1""", 2: """2""", 3: """3""", 4: """4""", 5: """5""", 6: """6""", 7: """7""", 8: """8""", 9: """9""", 10: """a""", 11: """b""", 12: """c""", 13: """d""", 14: """e""", 15: """f""", } def _lowerCamelCase ( a_ : float): assert type(a_) in (int, float) and decimal == int(a_) lowerCamelCase :Optional[Any] = int(a_) lowerCamelCase :Optional[Any] = '''''' lowerCamelCase :int = False if decimal < 0: lowerCamelCase :List[str] = True decimal *= -1 while decimal > 0: lowerCamelCase , lowerCamelCase :int = divmod(a_ , 16) lowerCamelCase :Union[str, Any] = values[remainder] + hexadecimal lowerCamelCase :str = '''0x''' + hexadecimal if negative: lowerCamelCase :List[str] = '''-''' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
49
import os from math import logaa def _lowerCamelCase ( a_ : str = "base_exp.txt"): lowerCamelCase :float = 0 lowerCamelCase :Optional[int] = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))): lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''','''))) if x * logaa(a_) > largest: lowerCamelCase :List[Any] = x * logaa(a_) lowerCamelCase :Any = i + 1 return result if __name__ == "__main__": print(solution())
49
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A__ = """pt""" elif is_tf_available(): A__ = """tf""" else: A__ = """jax""" class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = ByTaTokenizer _UpperCAmelCase = False def snake_case ( self : Dict ): super().setUp() lowerCamelCase :Optional[Any] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case ( self : Dict ): return ByTaTokenizer.from_pretrained('''google/byt5-small''' ) def snake_case ( self : Any , **__snake_case : List[Any] ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : int=False , __snake_case : Union[str, Any]=20 , __snake_case : Tuple=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. lowerCamelCase :Optional[int] = [] for i in range(len(__snake_case ) ): try: lowerCamelCase :List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__snake_case ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowerCamelCase :Optional[int] = list(filter(lambda __snake_case : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __snake_case ) ) lowerCamelCase :List[str] = list(filter(lambda __snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__snake_case ) , __snake_case ) ) if max_length is not None and len(__snake_case ) > max_length: lowerCamelCase :List[Any] = toks[:max_length] if min_length is not None and len(__snake_case ) < min_length and len(__snake_case ) > 0: while len(__snake_case ) < min_length: lowerCamelCase :Optional[int] = toks + toks # toks_str = [t[1] for t in toks] lowerCamelCase :List[str] = [t[0] for t in toks] # Ensure consistency lowerCamelCase :Any = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case ) if " " not in output_txt and len(__snake_case ) > 1: lowerCamelCase :List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__snake_case ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__snake_case ) ) if with_prefix_space: lowerCamelCase :List[str] = ''' ''' + output_txt lowerCamelCase :Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) return output_txt, output_ids def snake_case ( self : List[str] ): lowerCamelCase :List[str] = self.ta_base_tokenizer lowerCamelCase :List[str] = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] ) lowerCamelCase :Optional[int] = tokenizer(['''hi''', '''I went to the gym''', ''''''] ) self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] ) def snake_case ( self : Optional[int] ): lowerCamelCase :Union[str, Any] = self.ta_base_tokenizer lowerCamelCase :Union[str, Any] = '''Unicode €.''' lowerCamelCase :Optional[Any] = tokenizer(__snake_case ) lowerCamelCase :Union[str, Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['''input_ids'''] , __snake_case ) # decoding lowerCamelCase :Union[str, Any] = tokenizer.decode(__snake_case ) self.assertEqual(__snake_case , '''Unicode €.</s>''' ) lowerCamelCase :str = tokenizer('''e è é ê ë''' ) lowerCamelCase :Dict = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['''input_ids'''] , __snake_case ) # decoding lowerCamelCase :Optional[Any] = tokenizer.decode(__snake_case ) self.assertEqual(__snake_case , '''e è é ê ë</s>''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' ) def snake_case ( self : Any ): lowerCamelCase :Tuple = self.ta_base_tokenizer lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off lowerCamelCase :Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on lowerCamelCase :Any = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) if FRAMEWORK != "jax": lowerCamelCase :str = list(batch.input_ids.numpy()[0] ) else: lowerCamelCase :Optional[int] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__snake_case , __snake_case ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Optional[Any] = self.ta_base_tokenizer lowerCamelCase :Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowerCamelCase :Tuple = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , __snake_case ) self.assertIn('''attention_mask''' , __snake_case ) self.assertNotIn('''decoder_input_ids''' , __snake_case ) self.assertNotIn('''decoder_attention_mask''' , __snake_case ) def snake_case ( self : Optional[int] ): lowerCamelCase :Dict = self.ta_base_tokenizer lowerCamelCase :int = [ '''Summary of the text.''', '''Another summary.''', ] lowerCamelCase :List[str] = tokenizer( text_target=__snake_case , max_length=32 , padding='''max_length''' , truncation=__snake_case , return_tensors=__snake_case ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def snake_case ( self : List[str] ): lowerCamelCase :Union[str, Any] = self.ta_base_tokenizer lowerCamelCase :int = ['''A long paragraph for summarization. </s>'''] lowerCamelCase :Tuple = ['''Summary of the text. </s>'''] # fmt: off lowerCamelCase :Dict = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] lowerCamelCase :Tuple = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on lowerCamelCase :Optional[int] = tokenizer(__snake_case , text_target=__snake_case ) self.assertEqual(__snake_case , batch['''input_ids'''][0] ) self.assertEqual(__snake_case , batch['''labels'''][0] ) def snake_case ( self : List[str] ): # safety check on max_len default value so we are sure the test works lowerCamelCase :Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test lowerCamelCase :List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase :List[Any] = tempfile.mkdtemp() lowerCamelCase :Optional[Any] = ''' He is very happy, UNwant\u00E9d,running''' lowerCamelCase :int = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) tokenizer.save_pretrained(__snake_case ) lowerCamelCase :int = tokenizer.__class__.from_pretrained(__snake_case ) lowerCamelCase :Optional[Any] = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) self.assertListEqual(__snake_case , __snake_case ) shutil.rmtree(__snake_case ) lowerCamelCase :Any = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase :int = tempfile.mkdtemp() lowerCamelCase :Union[str, Any] = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) lowerCamelCase :List[Any] = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) lowerCamelCase :int = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) tokenizer.save_pretrained(__snake_case ) lowerCamelCase :int = tokenizer.__class__.from_pretrained(__snake_case ) lowerCamelCase :Any = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) self.assertListEqual(__snake_case , __snake_case ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) lowerCamelCase :Optional[int] = tokenizer.__class__.from_pretrained(__snake_case , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__snake_case ) def snake_case ( self : List[Any] ): lowerCamelCase :Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__snake_case ) with open(os.path.join(__snake_case , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: lowerCamelCase :List[Any] = json.load(__snake_case ) with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: lowerCamelCase :Any = json.load(__snake_case ) lowerCamelCase :Optional[int] = [F"<extra_id_{i}>" for i in range(125 )] lowerCamelCase :Optional[Any] = added_tokens_extra_ids + [ '''an_additional_special_token''' ] lowerCamelCase :Union[str, Any] = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(__snake_case , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__snake_case , __snake_case ) with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__snake_case , __snake_case ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCamelCase :List[Any] = tokenizer_class.from_pretrained( __snake_case , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCamelCase :Any = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__snake_case )] lowerCamelCase :Optional[int] = tokenizer_class.from_pretrained( __snake_case , additional_special_tokens=__snake_case , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def snake_case ( self : Optional[int] ): lowerCamelCase :Any = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__snake_case ) lowerCamelCase :Tuple = tokenizer_class.from_pretrained(__snake_case ) self.assertTrue(tokenizer.decode([255] ) == '''''' ) def snake_case ( self : Tuple ): pass def snake_case ( self : Dict ): pass def snake_case ( self : Optional[Any] ): pass def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : int ): # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens lowerCamelCase :Any = self.get_tokenizers(fast=__snake_case , do_lower_case=__snake_case ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): lowerCamelCase :str = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>'''] lowerCamelCase :Tuple = tokenizer.convert_tokens_to_string(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) def snake_case ( self : Tuple ): lowerCamelCase :Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): lowerCamelCase :Union[str, Any] = [ '''bos_token''', '''eos_token''', '''unk_token''', '''sep_token''', '''pad_token''', '''cls_token''', '''mask_token''', ] lowerCamelCase :Dict = 0 lowerCamelCase :List[Any] = tokenizer.convert_ids_to_tokens( __snake_case , skip_special_tokens=__snake_case ) for attr in attributes_list: setattr(__snake_case , attr + '''_id''' , __snake_case ) self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case ) self.assertEqual(getattr(__snake_case , attr + '''_id''' ) , __snake_case ) setattr(__snake_case , attr + '''_id''' , __snake_case ) self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case ) self.assertEqual(getattr(__snake_case , attr + '''_id''' ) , __snake_case ) setattr(__snake_case , '''additional_special_tokens_ids''' , [] ) self.assertListEqual(getattr(__snake_case , '''additional_special_tokens''' ) , [] ) self.assertListEqual(getattr(__snake_case , '''additional_special_tokens_ids''' ) , [] ) setattr(__snake_case , '''additional_special_tokens_ids''' , [token_id_to_test_setters] ) self.assertListEqual(getattr(__snake_case , '''additional_special_tokens''' ) , [token_to_test_setters] ) self.assertListEqual(getattr(__snake_case , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
49
def _lowerCamelCase ( a_ : list): if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''') for cell_n in range(1 , len(grid[0])): grid[0][cell_n] += grid[0][cell_n - 1] lowerCamelCase :Any = grid[0] for row_n in range(1 , len(a_)): lowerCamelCase :List[str] = grid[row_n] lowerCamelCase :Union[str, Any] = fill_row(a_ , a_) lowerCamelCase :List[Any] = grid[row_n] return grid[-1][-1] def _lowerCamelCase ( a_ : list , a_ : list): current_row[0] += row_above[0] for cell_n in range(1 , len(a_)): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n]) return current_row if __name__ == "__main__": import doctest doctest.testmod()
49
1
def _lowerCamelCase ( a_ : float , a_ : float , a_ : int): if principal <= 0: raise Exception('''Principal borrowed must be > 0''') if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''') if years_to_repay <= 0 or not isinstance(a_ , a_): raise Exception('''Years to repay must be an integer > 0''') # Yearly rate is divided by 12 to get monthly rate lowerCamelCase :Tuple = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowerCamelCase :Dict = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
49
import math def _lowerCamelCase ( a_ : int): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a_) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCamelCase ( a_ : float = 0.1): lowerCamelCase :Dict = 3 lowerCamelCase :List[Any] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1): primes += is_prime(a_) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
49
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : def __init__( self : Optional[int] , __snake_case : Tuple , __snake_case : Optional[Any]=3 , __snake_case : int=32 , __snake_case : List[Any]=3 , __snake_case : int=10 , __snake_case : Any=[10, 20, 30, 40] , __snake_case : Dict=[1, 1, 2, 1] , __snake_case : Union[str, Any]=True , __snake_case : Dict=True , __snake_case : Union[str, Any]="relu" , __snake_case : Any=3 , __snake_case : Dict=None , ): lowerCamelCase :Tuple = parent lowerCamelCase :int = batch_size lowerCamelCase :Union[str, Any] = image_size lowerCamelCase :Union[str, Any] = num_channels lowerCamelCase :List[str] = embeddings_size lowerCamelCase :int = hidden_sizes lowerCamelCase :Optional[int] = depths lowerCamelCase :List[Any] = is_training lowerCamelCase :Dict = use_labels lowerCamelCase :Optional[int] = hidden_act lowerCamelCase :Optional[int] = num_labels lowerCamelCase :Dict = scope lowerCamelCase :str = len(__snake_case ) def snake_case ( self : Dict ): lowerCamelCase :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase :Optional[Any] = None if self.use_labels: lowerCamelCase :Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase :Tuple = self.get_config() return config, pixel_values, labels def snake_case ( self : int ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def snake_case ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Dict ): lowerCamelCase :int = TFResNetModel(config=__snake_case ) lowerCamelCase :str = model(__snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def snake_case ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ): lowerCamelCase :Any = self.num_labels lowerCamelCase :Dict = TFResNetForImageClassification(__snake_case ) lowerCamelCase :List[str] = model(__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self : List[Any] ): lowerCamelCase :Optional[Any] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase :Optional[Any] = config_and_inputs lowerCamelCase :Tuple = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCAmelCase = ( {'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def snake_case ( self : List[str] ): lowerCamelCase :Union[str, Any] = TFResNetModelTester(self ) lowerCamelCase :str = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case ) def snake_case ( self : List[str] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : Union[str, Any] ): return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def snake_case ( self : Optional[int] ): pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def snake_case ( self : List[str] ): pass def snake_case ( self : Optional[int] ): lowerCamelCase , lowerCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :Any = model_class(__snake_case ) lowerCamelCase :Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase :Optional[Any] = [*signature.parameters.keys()] lowerCamelCase :Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __snake_case ) def snake_case ( self : List[str] ): lowerCamelCase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def snake_case ( self : List[str] ): def check_hidden_states_output(__snake_case : List[str] , __snake_case : Any , __snake_case : List[str] ): lowerCamelCase :Optional[Any] = model_class(__snake_case ) lowerCamelCase :List[str] = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase :List[Any] = self.model_tester.num_stages self.assertEqual(len(__snake_case ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase :str = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase :Any = layer_type lowerCamelCase :Dict = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase :int = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def snake_case ( self : int ): lowerCamelCase :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case ) @slow def snake_case ( self : List[str] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase :Tuple = TFResNetModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def _lowerCamelCase ( ): lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_tf @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def snake_case ( self : List[str] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def snake_case ( self : int ): lowerCamelCase :Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase :Optional[Any] = self.default_image_processor lowerCamelCase :Optional[Any] = prepare_img() lowerCamelCase :Optional[int] = image_processor(images=__snake_case , return_tensors='''tf''' ) # forward pass lowerCamelCase :Optional[Any] = model(**__snake_case ) # verify the logits lowerCamelCase :str = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __snake_case ) lowerCamelCase :List[str] = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __snake_case , atol=1e-4 ) )
49
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[Any] = -1 lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :str = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :str = TextStreamer(__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :Optional[int] = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Dict ): lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[Any] = -1 lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] ) lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case ) lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() lowerCamelCase :Any = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[str] = -1 lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :] lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :int = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Optional[int] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case ) model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n" lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def snake_case ( self : List[Any] ): lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 ) lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__snake_case ): lowerCamelCase :Dict = '''''' for new_text in streamer: streamer_text += new_text
49
1
from pathlib import Path import numpy as np from PIL import Image def _lowerCamelCase ( a_ : np.ndarray): lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b def _lowerCamelCase ( a_ : np.ndarray): return (gray > 1_27) & (gray <= 2_55) def _lowerCamelCase ( a_ : np.ndarray , a_ : np.ndarray): lowerCamelCase :List[Any] = np.zeros_like(a_) lowerCamelCase :Tuple = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1)) # Copy image to padded image lowerCamelCase :int = image # Iterate over image & apply kernel for x in range(image.shape[1]): for y in range(image.shape[0]): lowerCamelCase :Optional[Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() lowerCamelCase :str = int(summation > 0) return output if __name__ == "__main__": # read original image A__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" A__ = np.array(Image.open(lena_path)) # kernel to be applied A__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) A__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image A__ = Image.fromarray(output).convert("""RGB""") pil_img.save("""result_dilation.png""")
49
from maths.prime_factors import prime_factors def _lowerCamelCase ( a_ : int): if not isinstance(a_ , a_): lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer" raise TypeError(a_) if number < 1: raise ValueError('''Input must be a positive integer''') return -1 if len(prime_factors(a_)) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
49
1
from ...configuration_utils import PretrainedConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'donut-swin' _UpperCAmelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[int] , __snake_case : List[str]=224 , __snake_case : Optional[Any]=4 , __snake_case : List[Any]=3 , __snake_case : Optional[Any]=96 , __snake_case : Dict=[2, 2, 6, 2] , __snake_case : Optional[Any]=[3, 6, 12, 24] , __snake_case : str=7 , __snake_case : Dict=4.0 , __snake_case : List[str]=True , __snake_case : Optional[Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : List[str]=0.1 , __snake_case : List[str]="gelu" , __snake_case : List[str]=False , __snake_case : int=0.0_2 , __snake_case : List[Any]=1e-5 , **__snake_case : Dict , ): super().__init__(**__snake_case ) lowerCamelCase :Optional[Any] = image_size lowerCamelCase :Union[str, Any] = patch_size lowerCamelCase :Optional[Any] = num_channels lowerCamelCase :Dict = embed_dim lowerCamelCase :Optional[int] = depths lowerCamelCase :List[Any] = len(__snake_case ) lowerCamelCase :Any = num_heads lowerCamelCase :int = window_size lowerCamelCase :Dict = mlp_ratio lowerCamelCase :str = qkv_bias lowerCamelCase :int = hidden_dropout_prob lowerCamelCase :List[Any] = attention_probs_dropout_prob lowerCamelCase :List[Any] = drop_path_rate lowerCamelCase :Union[str, Any] = hidden_act lowerCamelCase :str = use_absolute_embeddings lowerCamelCase :Dict = layer_norm_eps lowerCamelCase :Optional[int] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase :Optional[int] = int(embed_dim * 2 ** (len(__snake_case ) - 1) )
49
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() A__ = logging.get_logger(__name__) def _lowerCamelCase ( a_ : str , a_ : str=False): lowerCamelCase :Optional[int] = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''')) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''')) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''')) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''')) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''')) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''')) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''')) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias")) # transformer encoder for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias")) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ]) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ]) # fmt: on return rename_keys def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False): for i in range(config.num_hidden_layers): if base_model: lowerCamelCase :Union[str, Any] = '''''' else: lowerCamelCase :Optional[int] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight") lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict lowerCamelCase :Any = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size] lowerCamelCase :int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase :Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase :Optional[Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :] def _lowerCamelCase ( a_ : int): lowerCamelCase :Any = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a_ , a_) def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple): lowerCamelCase :Optional[Any] = dct.pop(a_) lowerCamelCase :str = val def _lowerCamelCase ( ): lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw) return im @torch.no_grad() def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False): lowerCamelCase :Optional[int] = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , ) lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00) lowerCamelCase :List[Any] = False # load original model from timm lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCamelCase :List[str] = timm_model.state_dict() if base_model: remove_classification_head_(a_) lowerCamelCase :Tuple = create_rename_keys(a_ , a_) for src, dest in rename_keys: rename_key(a_ , a_ , a_) read_in_q_k_v(a_ , a_ , a_) lowerCamelCase :List[str] = '''huggingface/label-files''' lowerCamelCase :Any = '''imagenet-1k-id2label.json''' lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r''')) lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()} lowerCamelCase :Optional[int] = idalabel lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval() else: lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval() model.load_state_dict(a_) # create image processor lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_)) lowerCamelCase :str = transform.transforms lowerCamelCase :int = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowerCamelCase :Any = ViTHybridImageProcessor( do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowerCamelCase :Dict = prepare_img() lowerCamelCase :str = transform(a_).unsqueeze(0) lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values # verify pixel values assert torch.allclose(a_ , a_) # verify logits with torch.no_grad(): lowerCamelCase :Optional[int] = model(a_) lowerCamelCase :Union[str, Any] = outputs.logits print('''Predicted class:''' , logits.argmax(-1).item()) if base_model: lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3) else: lowerCamelCase :List[str] = timm_model(a_) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a_ , outputs.logits , atol=1e-3) print('''Looks ok!''') if pytorch_dump_folder_path is not None: Path(a_).mkdir(exist_ok=a_) print(F"Saving model {vit_name} to {pytorch_dump_folder_path}") model.save_pretrained(a_) print(F"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(a_) if push_to_hub: print(F"Pushing model and processor to the hub {vit_name}") model.push_to_hub(F"ybelkada/{vit_name}") processor.push_to_hub(F"ybelkada/{vit_name}") if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) A__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
49
1
from collections import defaultdict from math import gcd def _lowerCamelCase ( a_ : int = 1_50_00_00): lowerCamelCase :defaultdict = defaultdict(a_) lowerCamelCase :Optional[Any] = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , a_ , 2): if gcd(a_ , a_) > 1: continue lowerCamelCase :Dict = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(a_ , limit + 1 , a_): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1) if __name__ == "__main__": print(F'{solution() = }')
49
def _lowerCamelCase ( a_ : int = 4_00_00_00): lowerCamelCase :Dict = [0, 1] lowerCamelCase :Optional[Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1]) if fib[i + 2] > n: break i += 1 lowerCamelCase :Dict = 0 for j in range(len(a_) - 1): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'{solution() = }')
49
1
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor A__ = logging.get_logger(__name__) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : Dict , *__snake_case : List[Any] , **__snake_case : Any ): warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , __snake_case , ) super().__init__(*__snake_case , **__snake_case )
49
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ = { """configuration_nllb_moe""": [ """NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NllbMoeConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""", """NllbMoeForConditionalGeneration""", """NllbMoeModel""", """NllbMoePreTrainedModel""", """NllbMoeTop2Router""", """NllbMoeSparseMLP""", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
1
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex A__ = logging.getLogger(__name__) class _lowerCAmelCase : def __init__( self : int ): lowerCamelCase :Dict = False def snake_case ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : int , __snake_case : str ): if not self.initialized: lowerCamelCase :Optional[int] = RagRetriever( __snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , index=__snake_case , init_retrieval=__snake_case , ) lowerCamelCase :Any = True def snake_case ( self : str ): self.retriever.index.init_index() def snake_case ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] ): lowerCamelCase , lowerCamelCase :int = self.retriever._main_retrieve(__snake_case , __snake_case ) return doc_ids, retrieved_doc_embeds class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : Any , __snake_case : str , __snake_case : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]=None ): if index is not None and index.is_initialized() and len(__snake_case ) > 0: raise ValueError( '''When using Ray for distributed fine-tuning, ''' '''you\'ll need to provide the paths instead, ''' '''as the dataset and the index are loaded ''' '''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' ) super().__init__( __snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , index=__snake_case , init_retrieval=__snake_case , ) lowerCamelCase :Tuple = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(__snake_case , __snake_case , __snake_case , __snake_case ) for worker in self.retrieval_workers ] ) def snake_case ( self : Optional[Any] ): logger.info('''initializing retrieval''' ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def snake_case ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[int] ): if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. lowerCamelCase :str = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] lowerCamelCase , lowerCamelCase :Dict = ray.get(random_worker.retrieve.remote(__snake_case , __snake_case ) ) else: lowerCamelCase , lowerCamelCase :Union[str, Any] = self._main_retrieve(__snake_case , __snake_case ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__snake_case ) @classmethod def snake_case ( cls : int , __snake_case : Dict , __snake_case : List[Any]=None , **__snake_case : str ): return super(__snake_case , cls ).get_tokenizers(__snake_case , __snake_case , **__snake_case ) @classmethod def snake_case ( cls : Optional[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Union[str, Any]=None , **__snake_case : Tuple ): lowerCamelCase :Tuple = kwargs.pop('''config''' , __snake_case ) or RagConfig.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :Optional[int] = RagTokenizer.from_pretrained(__snake_case , config=__snake_case ) lowerCamelCase :Dict = rag_tokenizer.question_encoder lowerCamelCase :Dict = rag_tokenizer.generator if indexed_dataset is not None: lowerCamelCase :str = '''custom''' lowerCamelCase :Any = CustomHFIndex(config.retrieval_vector_size , __snake_case ) else: lowerCamelCase :List[str] = cls._build_index(__snake_case ) return cls( __snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , retrieval_workers=__snake_case , index=__snake_case , )
49
import numpy class _lowerCAmelCase : def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ): lowerCamelCase :Dict = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCamelCase :Dict = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCamelCase :Dict = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCamelCase :Any = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCamelCase :Union[str, Any] = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCamelCase :List[str] = numpy.zeros(output_array.shape ) def snake_case ( self : Optional[int] ): lowerCamelCase :Any = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCamelCase :Any = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCamelCase :Dict = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def snake_case ( self : Any ): lowerCamelCase :Union[str, Any] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCamelCase :Dict = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCamelCase :int = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ): for iteration in range(1 , iterations + 1 ): lowerCamelCase :Union[str, Any] = self.feedforward() self.back_propagation() if give_loss: lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F"Iteration {iteration} Loss: {loss}" ) def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ): lowerCamelCase :int = input_arr lowerCamelCase :Union[str, Any] = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCamelCase :Optional[Any] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCamelCase :Optional[int] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _lowerCamelCase ( a_ : numpy.ndarray): return 1 / (1 + numpy.exp(-value)) def _lowerCamelCase ( a_ : numpy.ndarray): return (value) * (1 - (value)) def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa) # Calling neural network class. lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork( input_array=a_ , output_array=a_) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=a_ , iterations=10 , give_loss=a_) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa)) if __name__ == "__main__": example()
49
1
def _lowerCamelCase ( a_ : int = 1_00): lowerCamelCase :int = set() lowerCamelCase :Dict = 0 lowerCamelCase :Union[str, Any] = n + 1 # maximum limit for a in range(2 , a_): for b in range(2 , a_): lowerCamelCase :Tuple = a**b # calculates the current power collect_powers.add(a_) # adds the result to the set return len(a_) if __name__ == "__main__": print("""Number of terms """, solution(int(str(input()).strip())))
49
def _lowerCamelCase ( a_ : str , a_ : str): lowerCamelCase :List[str] = len(a_) lowerCamelCase :List[str] = len(a_) lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)] lowerCamelCase :Optional[Any] = True for i in range(a_): for j in range(m + 1): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowerCamelCase :Any = True if a[i].islower(): lowerCamelCase :List[str] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
49
1
from __future__ import annotations class _lowerCAmelCase : def __init__( self : Union[str, Any] , __snake_case : List[str]=None ): lowerCamelCase :List[str] = data lowerCamelCase :str = None def __repr__( self : int ): lowerCamelCase :Union[str, Any] = [] lowerCamelCase :Optional[Any] = self while temp: string_rep.append(F"{temp.data}" ) lowerCamelCase :str = temp.next return "->".join(__snake_case ) def _lowerCamelCase ( a_ : list): if not elements_list: raise Exception('''The Elements List is empty''') lowerCamelCase :Optional[int] = Node(elements_list[0]) for i in range(1 , len(a_)): lowerCamelCase :List[str] = Node(elements_list[i]) lowerCamelCase :str = current.next return head def _lowerCamelCase ( a_ : Node): if head_node is not None and isinstance(a_ , a_): print_reverse(head_node.next) print(head_node.data) def _lowerCamelCase ( ): from doctest import testmod testmod() lowerCamelCase :List[Any] = make_linked_list([14, 52, 14, 12, 43]) print('''Linked List:''') print(a_) print('''Elements in Reverse:''') print_reverse(a_) if __name__ == "__main__": main()
49
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ): lowerCamelCase :Optional[Any] = parent lowerCamelCase :List[Any] = batch_size lowerCamelCase :Any = image_size lowerCamelCase :Union[str, Any] = patch_size lowerCamelCase :Any = num_channels lowerCamelCase :List[Any] = is_training lowerCamelCase :Optional[Any] = use_labels lowerCamelCase :Any = hidden_size lowerCamelCase :List[Any] = num_hidden_layers lowerCamelCase :List[str] = num_attention_heads lowerCamelCase :Tuple = intermediate_size lowerCamelCase :List[str] = hidden_act lowerCamelCase :List[str] = hidden_dropout_prob lowerCamelCase :Any = attention_probs_dropout_prob lowerCamelCase :List[Any] = type_sequence_label_size lowerCamelCase :Optional[int] = initializer_range lowerCamelCase :List[Any] = num_labels lowerCamelCase :Any = scope lowerCamelCase :Union[str, Any] = n_targets lowerCamelCase :Optional[Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens def snake_case ( self : List[str] ): lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowerCamelCase :List[str] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowerCamelCase :Optional[int] = [] for i in range(self.batch_size ): lowerCamelCase :List[str] = {} lowerCamelCase :Tuple = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=__snake_case ) lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case ) labels.append(__snake_case ) lowerCamelCase :str = self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] ): return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ): lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :Union[str, Any] = model(__snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ): lowerCamelCase :int = YolosForObjectDetection(__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :str = model(pixel_values=__snake_case ) lowerCamelCase :Any = model(__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def snake_case ( self : int ): lowerCamelCase :List[Any] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () _UpperCAmelCase = ( {'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ): lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowerCamelCase :Dict = [] for i in range(self.model_tester.batch_size ): lowerCamelCase :Optional[Any] = {} lowerCamelCase :List[Any] = torch.ones( size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long ) lowerCamelCase :str = torch.ones( self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float ) labels.append(__snake_case ) lowerCamelCase :Union[str, Any] = labels return inputs_dict def snake_case ( self : Tuple ): lowerCamelCase :Union[str, Any] = YolosModelTester(self ) lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 ) def snake_case ( self : Union[str, Any] ): self.config_tester.run_common_tests() def snake_case ( self : Optional[Any] ): # YOLOS does not use inputs_embeds pass def snake_case ( self : Tuple ): lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :Optional[int] = model_class(__snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase :str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :str = model_class(__snake_case ) lowerCamelCase :Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase :Tuple = [*signature.parameters.keys()] lowerCamelCase :Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __snake_case ) def snake_case ( self : int ): lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase :int = True # in YOLOS, the seq_len is different lowerCamelCase :str = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowerCamelCase :str = True lowerCamelCase :Tuple = False lowerCamelCase :Optional[int] = True lowerCamelCase :int = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :str = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase :Optional[Any] = True lowerCamelCase :str = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Tuple = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowerCamelCase :Optional[int] = len(__snake_case ) # Check attention is always last and order is fine lowerCamelCase :Union[str, Any] = True lowerCamelCase :List[Any] = True lowerCamelCase :Tuple = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Dict = 1 self.assertEqual(out_len + added_hidden_states , len(__snake_case ) ) lowerCamelCase :Dict = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def snake_case ( self : List[str] ): def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ): lowerCamelCase :Union[str, Any] = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Optional[Any] = outputs.hidden_states lowerCamelCase :Any = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__snake_case ) , __snake_case ) # YOLOS has a different seq_length lowerCamelCase :List[str] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :Union[str, Any] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase :Any = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*__snake_case ) @slow def snake_case ( self : Dict ): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def _lowerCamelCase ( ): lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def snake_case ( self : Tuple ): return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def snake_case ( self : Dict ): lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case ) lowerCamelCase :Optional[Any] = self.default_image_processor lowerCamelCase :str = prepare_img() lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # forward pass with torch.no_grad(): lowerCamelCase :Optional[Any] = model(inputs.pixel_values ) # verify outputs lowerCamelCase :int = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , __snake_case ) lowerCamelCase :Any = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , ) lowerCamelCase :Any = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) ) # verify postprocessing lowerCamelCase :List[str] = image_processor.post_process_object_detection( __snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case ) lowerCamelCase :str = [75, 75, 17, 63, 17] lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case ) self.assertEqual(len(results['''scores'''] ) , 5 ) self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
49
1
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = '' _UpperCAmelCase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _UpperCAmelCase = None # compression type in fsspec. ex: "gzip" _UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ): super().__init__(self , **__snake_case ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase :Optional[Any] = fsspec.open( __snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] ) lowerCamelCase :Dict = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) lowerCamelCase :List[str] = None @classmethod def snake_case ( cls : Any , __snake_case : Any ): # compressed file paths are always relative to the archive root return super()._strip_protocol(__snake_case ).lstrip('''/''' ) def snake_case ( self : Any ): if self.dir_cache is None: lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} lowerCamelCase :Optional[Any] = {f['''name''']: f} def snake_case ( self : Union[str, Any] , __snake_case : str ): return self.file.open().read() def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ): lowerCamelCase :List[str] = self._strip_protocol(__snake_case ) if mode != "rb": raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" ) return self.file.open() class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'bz2' _UpperCAmelCase = 'bz2' _UpperCAmelCase = '.bz2' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'gzip' _UpperCAmelCase = 'gzip' _UpperCAmelCase = '.gz' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'lz4' _UpperCAmelCase = 'lz4' _UpperCAmelCase = '.lz4' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'xz' _UpperCAmelCase = 'xz' _UpperCAmelCase = '.xz' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'zstd' _UpperCAmelCase = 'zstd' _UpperCAmelCase = '.zst' def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ): super().__init__( fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase :Tuple = self.file.__enter__ class _lowerCAmelCase : def __init__( self : Dict , __snake_case : Tuple ): lowerCamelCase :Optional[int] = file_ def __enter__( self : Optional[int] ): self._file.__enter__() return self def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ): self._file.__exit__(*__snake_case , **__snake_case ) def __iter__( self : Optional[Any] ): return iter(self._file ) def snake_case ( self : List[Any] ): return next(self._file ) def __getattr__( self : Any , __snake_case : str ): return getattr(self._file , __snake_case ) def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ): return WrappedFile(_enter(*__snake_case , **__snake_case ) ) lowerCamelCase :Dict = fixed_enter
49
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Tuple ): lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils ) lowerCamelCase :Dict = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase :Any = test_metrics @require_cpu def snake_case ( self : Dict ): debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def snake_case ( self : int ): debug_launcher(self.test_metrics.main ) @require_single_gpu def snake_case ( self : Any ): self.test_metrics.main() @require_multi_gpu def snake_case ( self : Optional[int] ): print(F"Found {torch.cuda.device_count()} devices." ) lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__snake_case , env=os.environ.copy() )
49
1
def _lowerCamelCase ( a_ : int): lowerCamelCase :int = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _lowerCamelCase ( a_ : int = 50_00): lowerCamelCase :Union[str, Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , a_)] for i, pentagonal_i in enumerate(a_): for j in range(a_ , len(a_)): lowerCamelCase :Dict = pentagonal_nums[j] lowerCamelCase :Any = pentagonal_i + pentagonal_j lowerCamelCase :str = pentagonal_j - pentagonal_i if is_pentagonal(a_) and is_pentagonal(a_): return b return -1 if __name__ == "__main__": print(F'{solution() = }')
49
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = '' _UpperCAmelCase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _UpperCAmelCase = None # compression type in fsspec. ex: "gzip" _UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ): super().__init__(self , **__snake_case ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase :Optional[Any] = fsspec.open( __snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] ) lowerCamelCase :Dict = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) lowerCamelCase :List[str] = None @classmethod def snake_case ( cls : Any , __snake_case : Any ): # compressed file paths are always relative to the archive root return super()._strip_protocol(__snake_case ).lstrip('''/''' ) def snake_case ( self : Any ): if self.dir_cache is None: lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} lowerCamelCase :Optional[Any] = {f['''name''']: f} def snake_case ( self : Union[str, Any] , __snake_case : str ): return self.file.open().read() def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ): lowerCamelCase :List[str] = self._strip_protocol(__snake_case ) if mode != "rb": raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" ) return self.file.open() class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'bz2' _UpperCAmelCase = 'bz2' _UpperCAmelCase = '.bz2' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'gzip' _UpperCAmelCase = 'gzip' _UpperCAmelCase = '.gz' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'lz4' _UpperCAmelCase = 'lz4' _UpperCAmelCase = '.lz4' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'xz' _UpperCAmelCase = 'xz' _UpperCAmelCase = '.xz' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'zstd' _UpperCAmelCase = 'zstd' _UpperCAmelCase = '.zst' def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ): super().__init__( fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase :Tuple = self.file.__enter__ class _lowerCAmelCase : def __init__( self : Dict , __snake_case : Tuple ): lowerCamelCase :Optional[int] = file_ def __enter__( self : Optional[int] ): self._file.__enter__() return self def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ): self._file.__exit__(*__snake_case , **__snake_case ) def __iter__( self : Optional[Any] ): return iter(self._file ) def snake_case ( self : List[Any] ): return next(self._file ) def __getattr__( self : Any , __snake_case : str ): return getattr(self._file , __snake_case ) def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ): return WrappedFile(_enter(*__snake_case , **__snake_case ) ) lowerCamelCase :Dict = fixed_enter
49
1
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( 'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __SCREAMING_SNAKE_CASE , ) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = RobertaConfig _UpperCAmelCase = 'roberta' def __init__( self : Dict , __snake_case : Optional[int] ): super().__init__(__snake_case ) lowerCamelCase :Optional[int] = RobertaEmbeddings(__snake_case ) self.init_weights() @add_start_docstrings( 'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __SCREAMING_SNAKE_CASE , ) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = RobertaConfig _UpperCAmelCase = 'roberta' def __init__( self : List[str] , __snake_case : Optional[Any] ): super().__init__(__snake_case ) lowerCamelCase :Any = config.num_labels lowerCamelCase :Union[str, Any] = config.num_hidden_layers lowerCamelCase :Optional[int] = DeeRobertaModel(__snake_case ) lowerCamelCase :Dict = nn.Dropout(config.hidden_dropout_prob ) lowerCamelCase :Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(__snake_case ) def snake_case ( self : int , __snake_case : Optional[int]=None , __snake_case : int=None , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Any=None , __snake_case : int=None , __snake_case : Union[str, Any]=None , __snake_case : Dict=-1 , __snake_case : Optional[Any]=False , ): lowerCamelCase :List[str] = self.num_layers try: lowerCamelCase :Tuple = self.roberta( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , ) lowerCamelCase :int = outputs[1] lowerCamelCase :str = self.dropout(__snake_case ) lowerCamelCase :List[Any] = self.classifier(__snake_case ) lowerCamelCase :Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: lowerCamelCase :Union[str, Any] = e.message lowerCamelCase :List[Any] = e.exit_layer lowerCamelCase :Union[str, Any] = outputs[0] if not self.training: lowerCamelCase :List[str] = entropy(__snake_case ) lowerCamelCase :Union[str, Any] = [] lowerCamelCase :Dict = [] if labels is not None: if self.num_labels == 1: # We are doing regression lowerCamelCase :List[str] = MSELoss() lowerCamelCase :Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: lowerCamelCase :List[str] = CrossEntropyLoss() lowerCamelCase :Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits lowerCamelCase :List[Any] = [] for highway_exit in outputs[-1]: lowerCamelCase :Optional[int] = highway_exit[0] if not self.training: highway_logits_all.append(__snake_case ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression lowerCamelCase :Dict = MSELoss() lowerCamelCase :int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: lowerCamelCase :int = CrossEntropyLoss() lowerCamelCase :List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(__snake_case ) if train_highway: lowerCamelCase :Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: lowerCamelCase :Union[str, Any] = (loss,) + outputs if not self.training: lowerCamelCase :List[str] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: lowerCamelCase :Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
49
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = LEDTokenizer _UpperCAmelCase = LEDTokenizerFast _UpperCAmelCase = True def snake_case ( self : Any ): super().setUp() lowerCamelCase :Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCamelCase :int = {'''unk_token''': '''<unk>'''} lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__snake_case ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__snake_case ) ) def snake_case ( self : int , **__snake_case : int ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Dict , **__snake_case : Any ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ): return "lower newer", "lower newer" @cached_property def snake_case ( self : Any ): return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def snake_case ( self : int ): return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def snake_case ( self : str ): lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowerCamelCase :List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(__snake_case , __snake_case ) @require_torch def snake_case ( self : Tuple ): lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' ) self.assertIn('''input_ids''' , __snake_case ) self.assertIn('''attention_mask''' , __snake_case ) self.assertNotIn('''labels''' , __snake_case ) self.assertNotIn('''decoder_attention_mask''' , __snake_case ) @require_torch def snake_case ( self : Union[str, Any] ): lowerCamelCase :Union[str, Any] = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def snake_case ( self : List[Any] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[Any] = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def snake_case ( self : Optional[int] ): lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.'''] lowerCamelCase :Any = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' ) lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' ) lowerCamelCase :Optional[int] = inputs['''input_ids'''] lowerCamelCase :Any = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def snake_case ( self : Dict ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.'''] lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case ) lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']] lowerCamelCase :str = tokenizer.pad(__snake_case ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case ) def snake_case ( self : Tuple ): pass def snake_case ( self : Optional[int] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :int = '''A, <mask> AllenNLP sentence.''' lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case ) lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( __snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
49
1
from __future__ import annotations def _lowerCamelCase ( a_ : list[int] , a_ : int): if len(a_) == 0: return False lowerCamelCase :Optional[int] = len(a_) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , a_) else: return binary_search(a_list[midpoint + 1 :] , a_) if __name__ == "__main__": A__ = input("""Enter numbers separated by comma:\n""").strip() A__ = [int(item.strip()) for item in user_input.split(""",""")] A__ = int(input("""Enter the number to be found in the list:\n""").strip()) A__ = """""" if binary_search(sequence, target) else """not """ print(F'{target} was {not_str}found in {sequence}')
49
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) A__ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""LayoutLMv2FeatureExtractor"""] A__ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
1
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : List[Any] ): lowerCamelCase :int = tempfile.mkdtemp() # fmt: off lowerCamelCase :Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on lowerCamelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) lowerCamelCase :List[str] = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } lowerCamelCase :Dict = os.path.join(self.tmpdirname , __snake_case ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__snake_case , __snake_case ) def snake_case ( self : int , **__snake_case : List[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Dict , **__snake_case : Tuple ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : List[Any] ): shutil.rmtree(self.tmpdirname ) def snake_case ( self : Optional[int] ): lowerCamelCase :Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase :Optional[int] = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case ( self : Optional[int] ): lowerCamelCase :Union[str, Any] = self.get_tokenizer() lowerCamelCase :Any = self.get_image_processor() lowerCamelCase :Any = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase :List[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __snake_case ) def snake_case ( self : Dict ): lowerCamelCase :Any = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase :Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCamelCase :Tuple = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 ) lowerCamelCase :List[str] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __snake_case ) def snake_case ( self : List[str] ): lowerCamelCase :Dict = self.get_image_processor() lowerCamelCase :int = self.get_tokenizer() lowerCamelCase :Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) lowerCamelCase :Dict = self.prepare_image_inputs() lowerCamelCase :Union[str, Any] = image_processor(__snake_case , return_tensors='''np''' ) lowerCamelCase :Union[str, Any] = processor(images=__snake_case , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def snake_case ( self : Optional[int] ): lowerCamelCase :Tuple = self.get_image_processor() lowerCamelCase :Tuple = self.get_tokenizer() lowerCamelCase :Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) lowerCamelCase :List[str] = '''lower newer''' lowerCamelCase :str = processor(text=__snake_case ) lowerCamelCase :Dict = tokenizer(__snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case ( self : str ): lowerCamelCase :Tuple = self.get_image_processor() lowerCamelCase :Tuple = self.get_tokenizer() lowerCamelCase :Dict = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) lowerCamelCase :Optional[Any] = '''lower newer''' lowerCamelCase :Tuple = self.prepare_image_inputs() lowerCamelCase :List[str] = processor(text=__snake_case , images=__snake_case ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(__snake_case ): processor() def snake_case ( self : Optional[int] ): lowerCamelCase :int = self.get_image_processor() lowerCamelCase :List[str] = self.get_tokenizer() lowerCamelCase :Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) lowerCamelCase :str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase :Dict = processor.batch_decode(__snake_case ) lowerCamelCase :Optional[int] = tokenizer.batch_decode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) def snake_case ( self : Tuple ): lowerCamelCase :Tuple = self.get_image_processor() lowerCamelCase :List[Any] = self.get_tokenizer() lowerCamelCase :Any = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) lowerCamelCase :Optional[Any] = '''lower newer''' lowerCamelCase :Union[str, Any] = self.prepare_image_inputs() lowerCamelCase :Tuple = processor(text=__snake_case , images=__snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
49
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def snake_case ( *__snake_case : str , **__snake_case : str ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def snake_case ( self : Union[str, Any] ): lowerCamelCase :Optional[int] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__snake_case ) , [ [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}], [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}], ] , ) lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], ] , ) @require_tf def snake_case ( self : Tuple ): lowerCamelCase :Tuple = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , ) lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], ] , ) @slow @require_torch def snake_case ( self : Any ): lowerCamelCase :str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__snake_case ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__snake_case ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , )
49
1
from __future__ import annotations def _lowerCamelCase ( a_ : int | float | str , a_ : int | float | str): if nth_term == "": return [""] lowerCamelCase :List[str] = int(a_) lowerCamelCase :List[Any] = int(a_) lowerCamelCase :list[str] = [] for temp in range(int(a_)): series.append(F"1 / {pow(temp + 1 , int(a_))}" if series else '''1''') return series if __name__ == "__main__": import doctest doctest.testmod() A__ = int(input("""Enter the last number (nth term) of the P-Series""")) A__ = int(input("""Enter the power for P-Series""")) print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""") print(p_series(nth_term, power))
49
import operator as op def _lowerCamelCase ( a_ : Tuple): lowerCamelCase :int = [] lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation lowerCamelCase :Optional[int] = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''') print('''-''' * (30 + len(a_))) for x in post_fix: if x.isdigit(): # if x in digit stack.append(a_) # append x to stack # output in tabular format print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') else: lowerCamelCase :Optional[Any] = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') lowerCamelCase :str = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') stack.append( str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , ) return int(stack[0]) if __name__ == "__main__": A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """) print("""\n\tResult = """, solve(Postfix))
49
1
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""") A__ = cvtColor(img, COLOR_BGR2GRAY) def _lowerCamelCase ( ): lowerCamelCase :int = cn.convert_to_negative(a_) # assert negative_img array for at least one True assert negative_img.any() def _lowerCamelCase ( ): with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img: # Work around assertion for response assert str(cc.change_contrast(a_ , 1_10)).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''') def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4) # Assert ambiguous array assert resp.all() def _lowerCamelCase ( ): lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0) # assert ambiguous array for all == True assert canny_img.all() lowerCamelCase :Optional[Any] = canny.canny(a_) # assert canny array for at least one True assert canny_array.any() def _lowerCamelCase ( ): assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all() def _lowerCamelCase ( ): # laplace diagonals lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_) assert res.any() def _lowerCamelCase ( ): assert med.median_filter(a_ , 3).any() def _lowerCamelCase ( ): lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_) assert grad.any() and theta.any() def _lowerCamelCase ( ): lowerCamelCase :Dict = sp.make_sepia(a_ , 20) assert sepia.all() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"): lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20) burkes.process() assert burkes.output_img.any() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ): lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00) nn.process() assert nn.output.any() def _lowerCamelCase ( ): lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. lowerCamelCase :Tuple = imread(a_ , 0) # Test for get_neighbors_pixel function() return not None lowerCamelCase :Dict = 0 lowerCamelCase :Optional[Any] = 0 lowerCamelCase :str = image[x_coordinate][y_coordinate] lowerCamelCase :Any = lbp.get_neighbors_pixel( a_ , a_ , a_ , a_) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1])) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0]): for j in range(0 , image.shape[1]): lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_) assert lbp_image.any()
49
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() A__ = logging.get_logger(__name__) A__ = """Hello, World!""" A__ = """en_XX""" def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool): lowerCamelCase :int = Path('''data_bin''') lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , ) xmod.eval() # disable dropout print(a_) lowerCamelCase :Any = xmod.model.encoder.sentence_encoder lowerCamelCase :List[str] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our X-MOD config:''' , a_) lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_) model.eval() # Now let's copy all the weights. # Embeddings lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight lowerCamelCase :List[str] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them. lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers): # Encoder: start of layer lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i] lowerCamelCase :List[str] = xmod_sent_encoder.layers[i] # self attention lowerCamelCase :Optional[int] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) ): raise AssertionError('''Dimensions of self-attention weights do not match.''') lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias # self-attention output lowerCamelCase :Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''') lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias # intermediate lowerCamelCase :Optional[int] = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''') lowerCamelCase :int = xmod_layer.fca.weight lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias # output lowerCamelCase :List[str] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''') lowerCamelCase :str = xmod_layer.fca.weight lowerCamelCase :int = xmod_layer.fca.bias lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()): raise AssertionError('''Lists of language adapters do not match.''') for lang_code, adapter in xmod_layer.adapter_modules.items(): lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code] lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code] lowerCamelCase :List[Any] = from_adapter.fca.weight lowerCamelCase :List[Any] = from_adapter.fca.bias lowerCamelCase :Dict = from_adapter.fca.weight lowerCamelCase :Optional[Any] = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight lowerCamelCase :Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1 model.roberta.set_default_language(a_) lowerCamelCase :Any = model(a_)[0] if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_)) else: lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0] print(our_output.shape , their_output.shape) lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item() print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7 lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''') if not success: raise Exception('''Something went wRoNg''') Path(a_).mkdir(parents=a_ , exist_ok=a_) print(F"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(a_) if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) A__ = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
49
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = KandinskyImgaImgPipeline _UpperCAmelCase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image'] _UpperCAmelCase = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', ] _UpperCAmelCase = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase = False @property def snake_case ( self : Optional[int] ): return 32 @property def snake_case ( self : Union[str, Any] ): return 32 @property def snake_case ( self : Union[str, Any] ): return self.time_input_dim @property def snake_case ( self : List[str] ): return self.time_input_dim * 4 @property def snake_case ( self : List[str] ): return 100 @property def snake_case ( self : List[str] ): lowerCamelCase :int = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def snake_case ( self : int ): torch.manual_seed(0 ) lowerCamelCase :Union[str, Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) lowerCamelCase :Tuple = MultilingualCLIP(__snake_case ) lowerCamelCase :Dict = text_encoder.eval() return text_encoder @property def snake_case ( self : Optional[Any] ): torch.manual_seed(0 ) lowerCamelCase :Tuple = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowerCamelCase :Dict = UNetaDConditionModel(**__snake_case ) return model @property def snake_case ( self : Dict ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case ( self : int ): torch.manual_seed(0 ) lowerCamelCase :Union[str, Any] = VQModel(**self.dummy_movq_kwargs ) return model def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = self.dummy_text_encoder lowerCamelCase :Any = self.dummy_tokenizer lowerCamelCase :Optional[int] = self.dummy_unet lowerCamelCase :Union[str, Any] = self.dummy_movq lowerCamelCase :int = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } lowerCamelCase :List[str] = DDIMScheduler(**__snake_case ) lowerCamelCase :Tuple = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def snake_case ( self : Tuple , __snake_case : str , __snake_case : Tuple=0 ): lowerCamelCase :Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case ) lowerCamelCase :str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case ) # create init_image lowerCamelCase :List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case ) lowerCamelCase :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase :Optional[int] = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((256, 256) ) if str(__snake_case ).startswith('''mps''' ): lowerCamelCase :Any = torch.manual_seed(__snake_case ) else: lowerCamelCase :Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) lowerCamelCase :Union[str, Any] = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def snake_case ( self : Tuple ): lowerCamelCase :List[Any] = '''cpu''' lowerCamelCase :List[str] = self.get_dummy_components() lowerCamelCase :Any = self.pipeline_class(**__snake_case ) lowerCamelCase :Union[str, Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) lowerCamelCase :Dict = pipe(**self.get_dummy_inputs(__snake_case ) ) lowerCamelCase :List[str] = output.images lowerCamelCase :Any = pipe( **self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0] lowerCamelCase :Tuple = image[0, -3:, -3:, -1] lowerCamelCase :Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase :Dict = np.array( [0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Any ): lowerCamelCase :Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) lowerCamelCase :int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowerCamelCase :int = '''A red cartoon frog, 4k''' lowerCamelCase :Dict = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__snake_case ) lowerCamelCase :Optional[Any] = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) lowerCamelCase :Optional[Any] = pipeline.to(__snake_case ) pipeline.set_progress_bar_config(disable=__snake_case ) lowerCamelCase :int = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase , lowerCamelCase :int = pipe_prior( __snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() lowerCamelCase :int = pipeline( __snake_case , image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) lowerCamelCase :Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__snake_case , __snake_case )
49
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { """andreasmadsen/efficient_mlm_m0.40""": ( """https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json""" ), } class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'roberta-prelayernorm' def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) lowerCamelCase :Optional[int] = vocab_size lowerCamelCase :Dict = hidden_size lowerCamelCase :Tuple = num_hidden_layers lowerCamelCase :Optional[int] = num_attention_heads lowerCamelCase :Any = hidden_act lowerCamelCase :List[Any] = intermediate_size lowerCamelCase :Union[str, Any] = hidden_dropout_prob lowerCamelCase :str = attention_probs_dropout_prob lowerCamelCase :Tuple = max_position_embeddings lowerCamelCase :int = type_vocab_size lowerCamelCase :Optional[Any] = initializer_range lowerCamelCase :Union[str, Any] = layer_norm_eps lowerCamelCase :Dict = position_embedding_type lowerCamelCase :List[Any] = use_cache lowerCamelCase :Optional[int] = classifier_dropout class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): @property def snake_case ( self : Any ): if self.task == "multiple-choice": lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
49
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ = { """configuration_upernet""": ["""UperNetConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """UperNetForSemanticSegmentation""", """UperNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = DebertaTokenizer _UpperCAmelCase = True _UpperCAmelCase = DebertaTokenizerFast def snake_case ( self : List[str] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase :Dict = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''} lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__snake_case ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__snake_case ) ) def snake_case ( self : str , **__snake_case : Dict ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Optional[Any] , __snake_case : int ): lowerCamelCase :List[Any] = '''lower newer''' lowerCamelCase :List[str] = '''lower newer''' return input_text, output_text def snake_case ( self : str ): lowerCamelCase :Optional[int] = self.get_tokenizer() lowerCamelCase :Union[str, Any] = '''lower newer''' lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) lowerCamelCase :List[str] = tokens + [tokenizer.unk_token] lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case ) def snake_case ( self : Optional[int] ): lowerCamelCase :List[str] = self.get_tokenizer() lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' ) lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __snake_case ) @slow def snake_case ( self : str ): lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case ) lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case ) lowerCamelCase :Union[str, Any] = tokenizer.encode( '''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case ) lowerCamelCase :str = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case ) lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case ) lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def snake_case ( self : str ): lowerCamelCase :List[str] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase :Tuple = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case ) lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']] # fmt: off lowerCamelCase :Any = { '''input_ids''': [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on lowerCamelCase :Optional[int] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __snake_case ) for expected, decoded in zip(__snake_case , __snake_case ): self.assertEqual(__snake_case , __snake_case )
49
1
import math def _lowerCamelCase ( a_ : int): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a_) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCamelCase ( a_ : float = 0.1): lowerCamelCase :Dict = 3 lowerCamelCase :List[Any] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1): primes += is_prime(a_) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
49
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) A__ = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): lowerCamelCase :Tuple = None lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) lowerCamelCase :Optional[int] = os.path.abspath('''examples''' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ): lowerCamelCase :Union[str, Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) lowerCamelCase :int = '''\n'''.join(__snake_case ) if special_strings is not None: for string in special_strings: lowerCamelCase :int = diff.replace(__snake_case , '''''' ) self.assertEqual(__snake_case , '''''' ) def snake_case ( self : Dict ): self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) lowerCamelCase :Optional[int] = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} ) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = False @classmethod def snake_case ( cls : Optional[Any] ): super().setUpClass() lowerCamelCase :Any = tempfile.mkdtemp() lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case ( cls : Dict ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def snake_case ( self : int ): lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def snake_case ( self : List[Any] ): lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() lowerCamelCase :List[Any] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def snake_case ( self : List[str] ): lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split() lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) def snake_case ( self : str ): lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split() lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): lowerCamelCase :Union[str, Any] = torch.cuda.device_count() else: lowerCamelCase :Dict = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) else: self.assertIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) @slow def snake_case ( self : Any ): lowerCamelCase :Tuple = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case ) lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1] lowerCamelCase :List[str] = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def snake_case ( self : int ): lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : Any ): with tempfile.TemporaryDirectory() as tmpdir: lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) ) def snake_case ( self : Tuple ): lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def snake_case ( self : Optional[Any] ): lowerCamelCase :int = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
49
1
A__ = """ # Transformers 설치 방법 ! pip install transformers datasets # 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요. # ! pip install git+https://github.com/huggingface/transformers.git """ A__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] A__ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
49
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""") A__ = cvtColor(img, COLOR_BGR2GRAY) def _lowerCamelCase ( ): lowerCamelCase :int = cn.convert_to_negative(a_) # assert negative_img array for at least one True assert negative_img.any() def _lowerCamelCase ( ): with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img: # Work around assertion for response assert str(cc.change_contrast(a_ , 1_10)).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''') def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4) # Assert ambiguous array assert resp.all() def _lowerCamelCase ( ): lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0) # assert ambiguous array for all == True assert canny_img.all() lowerCamelCase :Optional[Any] = canny.canny(a_) # assert canny array for at least one True assert canny_array.any() def _lowerCamelCase ( ): assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all() def _lowerCamelCase ( ): # laplace diagonals lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_) assert res.any() def _lowerCamelCase ( ): assert med.median_filter(a_ , 3).any() def _lowerCamelCase ( ): lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_) assert grad.any() and theta.any() def _lowerCamelCase ( ): lowerCamelCase :Dict = sp.make_sepia(a_ , 20) assert sepia.all() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"): lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20) burkes.process() assert burkes.output_img.any() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ): lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00) nn.process() assert nn.output.any() def _lowerCamelCase ( ): lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. lowerCamelCase :Tuple = imread(a_ , 0) # Test for get_neighbors_pixel function() return not None lowerCamelCase :Dict = 0 lowerCamelCase :Optional[Any] = 0 lowerCamelCase :str = image[x_coordinate][y_coordinate] lowerCamelCase :Any = lbp.get_neighbors_pixel( a_ , a_ , a_ , a_) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1])) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0]): for j in range(0 , image.shape[1]): lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_) assert lbp_image.any()
49
1
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): _UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING _UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def snake_case ( self : List[str] ): lowerCamelCase :str = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCamelCase :str = text_generator('''This is a test''' , do_sample=__snake_case ) self.assertEqual( __snake_case , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCamelCase :Any = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __snake_case , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCamelCase :Optional[Any] = text_generator('''This is a test''' , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case ) self.assertEqual( __snake_case , [ {'''generated_token_ids''': ANY(__snake_case )}, {'''generated_token_ids''': ANY(__snake_case )}, ] , ) lowerCamelCase :Any = text_generator.model.config.eos_token_id lowerCamelCase :Union[str, Any] = '''<pad>''' lowerCamelCase :Dict = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , ) self.assertEqual( __snake_case , [ [ {'''generated_token_ids''': ANY(__snake_case )}, {'''generated_token_ids''': ANY(__snake_case )}, ], [ {'''generated_token_ids''': ANY(__snake_case )}, {'''generated_token_ids''': ANY(__snake_case )}, ], ] , ) @require_tf def snake_case ( self : Tuple ): lowerCamelCase :Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCamelCase :Optional[int] = text_generator('''This is a test''' , do_sample=__snake_case ) self.assertEqual( __snake_case , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCamelCase :List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__snake_case ) self.assertEqual( __snake_case , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def snake_case ( self : Tuple , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[int] ): lowerCamelCase :Dict = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case ) return text_generator, ["This is a test", "Another test"] def snake_case ( self : Tuple ): lowerCamelCase :Dict = '''Hello I believe in''' lowerCamelCase :Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :str = text_generator(__snake_case ) self.assertEqual( __snake_case , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCamelCase :str = text_generator(__snake_case , stop_sequence=''' fe''' ) self.assertEqual(__snake_case , [{'''generated_text''': '''Hello I believe in fe'''}] ) def snake_case ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Any ): lowerCamelCase :Any = text_generator.model lowerCamelCase :int = text_generator.tokenizer lowerCamelCase :Optional[Any] = text_generator('''This is a test''' ) self.assertEqual(__snake_case , [{'''generated_text''': ANY(__snake_case )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCamelCase :Tuple = text_generator('''This is a test''' , return_full_text=__snake_case ) self.assertEqual(__snake_case , [{'''generated_text''': ANY(__snake_case )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCamelCase :Dict = pipeline(task='''text-generation''' , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case ) lowerCamelCase :List[Any] = text_generator('''This is a test''' ) self.assertEqual(__snake_case , [{'''generated_text''': ANY(__snake_case )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCamelCase :int = text_generator('''This is a test''' , return_full_text=__snake_case ) self.assertEqual(__snake_case , [{'''generated_text''': ANY(__snake_case )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCamelCase :Dict = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__snake_case ) self.assertEqual( __snake_case , [ [{'''generated_text''': ANY(__snake_case )}, {'''generated_text''': ANY(__snake_case )}], [{'''generated_text''': ANY(__snake_case )}, {'''generated_text''': ANY(__snake_case )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCamelCase :Optional[Any] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case ) self.assertEqual( __snake_case , [ [{'''generated_text''': ANY(__snake_case )}, {'''generated_text''': ANY(__snake_case )}], [{'''generated_text''': ANY(__snake_case )}, {'''generated_text''': ANY(__snake_case )}], ] , ) with self.assertRaises(__snake_case ): lowerCamelCase :Tuple = text_generator('''test''' , return_full_text=__snake_case , return_text=__snake_case ) with self.assertRaises(__snake_case ): lowerCamelCase :Tuple = text_generator('''test''' , return_full_text=__snake_case , return_tensors=__snake_case ) with self.assertRaises(__snake_case ): lowerCamelCase :Tuple = text_generator('''test''' , return_text=__snake_case , return_tensors=__snake_case ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCamelCase :Any = text_generator('''''' ) self.assertEqual(__snake_case , [{'''generated_text''': ANY(__snake_case )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCamelCase :str = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCamelCase :Dict = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCamelCase :Union[str, Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__snake_case ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def snake_case ( self : Union[str, Any] ): import torch # Classic `model_kwargs` lowerCamelCase :int = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase :Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __snake_case , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCamelCase :Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCamelCase :Optional[Any] = pipe('''This is a test''' ) self.assertEqual( __snake_case , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCamelCase :int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCamelCase :Any = pipe('''This is a test''' ) self.assertEqual( __snake_case , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def snake_case ( self : Optional[int] ): import torch lowerCamelCase :int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def snake_case ( self : Dict ): import torch lowerCamelCase :Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__snake_case , top_p=0.5 ) def snake_case ( self : Dict ): lowerCamelCase :int = '''Hello world''' lowerCamelCase :List[str] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCamelCase :List[str] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCamelCase :Union[str, Any] = logging.get_logger('''transformers.generation.utils''' ) lowerCamelCase :List[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__snake_case ) as cl: lowerCamelCase :Optional[Any] = text_generator(__snake_case , max_length=10 , max_new_tokens=1 ) self.assertIn(__snake_case , cl.out ) # The user only sets one -> no warning with CaptureLogger(__snake_case ) as cl: lowerCamelCase :List[Any] = text_generator(__snake_case , max_new_tokens=1 ) self.assertNotIn(__snake_case , cl.out ) with CaptureLogger(__snake_case ) as cl: lowerCamelCase :List[Any] = text_generator(__snake_case , max_length=10 ) self.assertNotIn(__snake_case , cl.out )
49
import os from math import logaa def _lowerCamelCase ( a_ : str = "base_exp.txt"): lowerCamelCase :float = 0 lowerCamelCase :Optional[int] = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))): lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''','''))) if x * logaa(a_) > largest: lowerCamelCase :List[Any] = x * logaa(a_) lowerCamelCase :Any = i + 1 return result if __name__ == "__main__": print(solution())
49
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
def _lowerCamelCase ( a_ : list): if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''') for cell_n in range(1 , len(grid[0])): grid[0][cell_n] += grid[0][cell_n - 1] lowerCamelCase :Any = grid[0] for row_n in range(1 , len(a_)): lowerCamelCase :List[str] = grid[row_n] lowerCamelCase :Union[str, Any] = fill_row(a_ , a_) lowerCamelCase :List[Any] = grid[row_n] return grid[-1][-1] def _lowerCamelCase ( a_ : list , a_ : list): current_row[0] += row_above[0] for cell_n in range(1 , len(a_)): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n]) return current_row if __name__ == "__main__": import doctest doctest.testmod()
49
1
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def snake_case ( self : Optional[int] ): lowerCamelCase :Union[str, Any] = 1 lowerCamelCase :Dict = 3 lowerCamelCase :Any = (32, 32) lowerCamelCase :Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__snake_case ) return image @property def snake_case ( self : Tuple ): torch.manual_seed(0 ) lowerCamelCase :Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def snake_case ( self : List[Any] ): torch.manual_seed(0 ) lowerCamelCase :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def snake_case ( self : str ): torch.manual_seed(0 ) lowerCamelCase :Dict = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , ) return RobertaSeriesModelWithTransformation(__snake_case ) @property def snake_case ( self : List[Any] ): def extract(*__snake_case : Dict , **__snake_case : Optional[Any] ): class _lowerCAmelCase : def __init__( self : List[str] ): lowerCamelCase :Optional[Any] = torch.ones([0] ) def snake_case ( self : Tuple , __snake_case : Any ): self.pixel_values.to(__snake_case ) return self return Out() return extract def snake_case ( self : str ): lowerCamelCase :Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase :Any = self.dummy_cond_unet lowerCamelCase :List[str] = PNDMScheduler(skip_prk_steps=__snake_case ) lowerCamelCase :Any = self.dummy_vae lowerCamelCase :Any = self.dummy_text_encoder lowerCamelCase :Union[str, Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCamelCase :Union[str, Any] = 77 lowerCamelCase :Optional[Any] = self.dummy_image.to(__snake_case ) lowerCamelCase :int = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk lowerCamelCase :List[str] = AltDiffusionImgaImgPipeline( unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , ) lowerCamelCase :str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case ) lowerCamelCase :List[Any] = alt_pipe.to(__snake_case ) alt_pipe.set_progress_bar_config(disable=__snake_case ) lowerCamelCase :str = '''A painting of a squirrel eating a burger''' lowerCamelCase :Tuple = torch.Generator(device=__snake_case ).manual_seed(0 ) lowerCamelCase :List[Any] = alt_pipe( [prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__snake_case , ) lowerCamelCase :List[Any] = output.images lowerCamelCase :Tuple = torch.Generator(device=__snake_case ).manual_seed(0 ) lowerCamelCase :Tuple = alt_pipe( [prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__snake_case , return_dict=__snake_case , )[0] lowerCamelCase :str = image[0, -3:, -3:, -1] lowerCamelCase :Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase :str = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def snake_case ( self : Optional[Any] ): lowerCamelCase :int = self.dummy_cond_unet lowerCamelCase :str = PNDMScheduler(skip_prk_steps=__snake_case ) lowerCamelCase :Union[str, Any] = self.dummy_vae lowerCamelCase :Optional[Any] = self.dummy_text_encoder lowerCamelCase :int = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCamelCase :Any = 77 lowerCamelCase :Tuple = self.dummy_image.to(__snake_case ) # put models in fp16 lowerCamelCase :Any = unet.half() lowerCamelCase :List[str] = vae.half() lowerCamelCase :Any = bert.half() # make sure here that pndm scheduler skips prk lowerCamelCase :int = AltDiffusionImgaImgPipeline( unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , ) lowerCamelCase :Dict = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case ) lowerCamelCase :List[Any] = alt_pipe.to(__snake_case ) alt_pipe.set_progress_bar_config(disable=__snake_case ) lowerCamelCase :List[Any] = '''A painting of a squirrel eating a burger''' lowerCamelCase :Optional[int] = torch.manual_seed(0 ) lowerCamelCase :Optional[int] = alt_pipe( [prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''np''' , image=__snake_case , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def snake_case ( self : List[Any] ): lowerCamelCase :Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase :Optional[int] = init_image.resize((760, 504) ) lowerCamelCase :Any = '''BAAI/AltDiffusion''' lowerCamelCase :Tuple = AltDiffusionImgaImgPipeline.from_pretrained( __snake_case , safety_checker=__snake_case , ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() lowerCamelCase :List[Any] = '''A fantasy landscape, trending on artstation''' lowerCamelCase :Union[str, Any] = torch.manual_seed(0 ) lowerCamelCase :Tuple = pipe( prompt=__snake_case , image=__snake_case , strength=0.7_5 , guidance_scale=7.5 , generator=__snake_case , output_type='''np''' , ) lowerCamelCase :Any = output.images[0] lowerCamelCase :Tuple = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) lowerCamelCase :List[str] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Optional[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : int ): lowerCamelCase :List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCamelCase :List[str] = init_image.resize((768, 512) ) lowerCamelCase :Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' ) lowerCamelCase :str = '''BAAI/AltDiffusion''' lowerCamelCase :List[Any] = AltDiffusionImgaImgPipeline.from_pretrained( __snake_case , safety_checker=__snake_case , ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() lowerCamelCase :Any = '''A fantasy landscape, trending on artstation''' lowerCamelCase :int = torch.manual_seed(0 ) lowerCamelCase :List[Any] = pipe( prompt=__snake_case , image=__snake_case , strength=0.7_5 , guidance_scale=7.5 , generator=__snake_case , output_type='''np''' , ) lowerCamelCase :Dict = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1e-2
49
import math def _lowerCamelCase ( a_ : int): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a_) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCamelCase ( a_ : float = 0.1): lowerCamelCase :Dict = 3 lowerCamelCase :List[Any] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1): primes += is_prime(a_) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
49
1
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self : str , __snake_case : str , __snake_case : Dict=13 , __snake_case : Dict=7 , __snake_case : Any=True , __snake_case : Any=True , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : Dict=99 , __snake_case : int=64 , __snake_case : Dict=32 , __snake_case : int=5 , __snake_case : Union[str, Any]=4 , __snake_case : Optional[int]=37 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Dict=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : List[str]=16 , __snake_case : str=2 , __snake_case : Optional[int]=0.0_2 , __snake_case : Optional[int]=3 , __snake_case : Optional[int]=4 , __snake_case : Dict=None , ): lowerCamelCase :Optional[Any] = parent lowerCamelCase :Optional[Any] = batch_size lowerCamelCase :str = seq_length lowerCamelCase :Union[str, Any] = is_training lowerCamelCase :Tuple = use_input_mask lowerCamelCase :int = use_token_type_ids lowerCamelCase :List[Any] = use_labels lowerCamelCase :List[Any] = vocab_size lowerCamelCase :Any = hidden_size lowerCamelCase :Tuple = embedding_size lowerCamelCase :List[Any] = num_hidden_layers lowerCamelCase :Optional[Any] = num_attention_heads lowerCamelCase :List[str] = intermediate_size lowerCamelCase :List[str] = hidden_act lowerCamelCase :Dict = hidden_dropout_prob lowerCamelCase :List[Any] = attention_probs_dropout_prob lowerCamelCase :Tuple = max_position_embeddings lowerCamelCase :Optional[int] = type_vocab_size lowerCamelCase :List[str] = type_sequence_label_size lowerCamelCase :Optional[int] = initializer_range lowerCamelCase :Any = num_labels lowerCamelCase :Any = num_choices lowerCamelCase :Optional[int] = scope def snake_case ( self : Any ): lowerCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase :Union[str, Any] = None if self.use_input_mask: lowerCamelCase :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase :Dict = None if self.use_token_type_ids: lowerCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase :Dict = None lowerCamelCase :Tuple = None lowerCamelCase :Optional[Any] = None if self.use_labels: lowerCamelCase :str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase :int = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase :Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self : str ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , ) def snake_case ( self : int , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ): lowerCamelCase :Dict = MegatronBertModel(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case ) lowerCamelCase :str = model(__snake_case , token_type_ids=__snake_case ) lowerCamelCase :Optional[int] = model(__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case ( self : int , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] ): lowerCamelCase :Dict = MegatronBertForMaskedLM(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :List[Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self : Tuple , __snake_case : int , __snake_case : str , __snake_case : Any , __snake_case : List[str] , __snake_case : str , __snake_case : Any , __snake_case : List[Any] ): lowerCamelCase :Any = MegatronBertForCausalLM(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self : str , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str ): lowerCamelCase :Dict = MegatronBertForNextSentencePrediction(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :int = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def snake_case ( self : List[str] , __snake_case : str , __snake_case : int , __snake_case : Tuple , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int] ): lowerCamelCase :Dict = MegatronBertForPreTraining(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :Optional[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def snake_case ( self : Optional[int] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[int] ): lowerCamelCase :Dict = MegatronBertForQuestionAnswering(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :Tuple = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self : Dict , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Any , __snake_case : int , __snake_case : int , __snake_case : Tuple , __snake_case : Tuple ): lowerCamelCase :int = self.num_labels lowerCamelCase :List[str] = MegatronBertForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self : List[str] , __snake_case : Optional[int] , __snake_case : str , __snake_case : int , __snake_case : Dict , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Dict ): lowerCamelCase :Optional[Any] = self.num_labels lowerCamelCase :int = MegatronBertForTokenClassification(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :Any = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Union[str, Any] ): lowerCamelCase :Optional[Any] = self.num_choices lowerCamelCase :Tuple = MegatronBertForMultipleChoice(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase :List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase :List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase :Dict = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case ( self : Dict ): lowerCamelCase :List[str] = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) :Optional[Any] = config_and_inputs lowerCamelCase :List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True # test_resize_embeddings = False _UpperCAmelCase = False def snake_case ( self : Any , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[Any]=False ): lowerCamelCase :Union[str, Any] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) if return_labels: if model_class in get_values(__snake_case ): lowerCamelCase :int = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case ) lowerCamelCase :str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__snake_case ) return inputs_dict def snake_case ( self : Optional[Any] ): lowerCamelCase :List[Any] = MegatronBertModelTester(self ) lowerCamelCase :Union[str, Any] = ConfigTester(self , config_class=__snake_case , hidden_size=37 ) def snake_case ( self : Dict ): self.config_tester.run_common_tests() def snake_case ( self : Any ): lowerCamelCase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__snake_case ) def snake_case ( self : List[str] ): lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case ) def snake_case ( self : Any ): lowerCamelCase :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case ) def snake_case ( self : int ): lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case ) def snake_case ( self : List[Any] ): lowerCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case ) def snake_case ( self : Union[str, Any] ): lowerCamelCase :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case ) def snake_case ( self : int ): lowerCamelCase :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case ) def snake_case ( self : Optional[int] ): lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case ) def _lowerCamelCase ( a_ : Optional[Any]): return torch.tensor( a_ , dtype=torch.long , device=a_ , ) A__ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def snake_case ( self : Union[str, Any] ): lowerCamelCase :Union[str, Any] = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCamelCase :Any = os.path.join(os.environ['''MYDIR'''] , __snake_case ) lowerCamelCase :Union[str, Any] = MegatronBertModel.from_pretrained(__snake_case ) model.to(__snake_case ) model.half() lowerCamelCase :List[str] = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): lowerCamelCase :Any = model(__snake_case )[0] lowerCamelCase :Tuple = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __snake_case ) lowerCamelCase :str = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8] for ii in range(3 ): for jj in range(3 ): lowerCamelCase :int = output[0, ii, jj] lowerCamelCase :str = expected[3 * ii + jj] lowerCamelCase :Union[str, Any] = '''ii={} jj={} a={} b={}'''.format(__snake_case , __snake_case , __snake_case , __snake_case ) self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case ) , msg=__snake_case )
49
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[Any] = -1 lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :str = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :str = TextStreamer(__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :Optional[int] = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Dict ): lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[Any] = -1 lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] ) lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case ) lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() lowerCamelCase :Any = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[str] = -1 lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :] lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :int = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Optional[int] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case ) model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n" lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def snake_case ( self : List[Any] ): lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 ) lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__snake_case ): lowerCamelCase :Dict = '''''' for new_text in streamer: streamer_text += new_text
49
1
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _lowerCamelCase ( a_ : Optional[int]=32 , a_ : Any=10 , a_ : Dict=1_00 , a_ : int=10_26 , a_ : List[str]=True , a_ : Dict="data/tokenized_stories_train_wikitext103.jbl" , a_ : int="igf_context_pairs.jbl" , ): set_seed(3) # generate train_data and objective_set lowerCamelCase , lowerCamelCase :Any = generate_datasets( a_ , a_ , number=a_ , min_len=10_26 , trim=a_) # keeps model same across runs set_seed(4) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? lowerCamelCase :Tuple = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''') # load pretrained model lowerCamelCase :List[str] = load_gpta('''gpt2''').to(a_) print('''computing perplexity on objective set''') lowerCamelCase :Tuple = compute_perplexity(a_ , a_ , a_).item() print('''perplexity on objective set:''' , a_) # collect igf pairs and save to file demo.jbl collect_objective_set(a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _lowerCamelCase ( a_ : Optional[Any] , a_ : List[Any]=15 , a_ : List[Any]=1_28 , a_ : Optional[Any]=1_00 , a_ : Tuple="igf_model.pt" , ): set_seed(42) # Load pre-trained model lowerCamelCase :Tuple = GPTaLMHeadModel.from_pretrained('''gpt2''') # Initialize secondary learner to use embedding weights of model lowerCamelCase :List[Any] = SecondaryLearner(a_) # Train secondary learner lowerCamelCase :Tuple = train_secondary_learner( a_ , a_ , max_epochs=a_ , batch_size=a_ , eval_freq=1_00 , igf_model_path=a_ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _lowerCamelCase ( a_ : Union[str, Any] , a_ : int , a_ : Optional[Any] , a_ : List[Any]=32 , a_ : Tuple=10_00 , a_ : List[Any]=16 , a_ : List[str]=1.0 , a_ : Tuple=recopy_gpta , a_ : Tuple=None , a_ : List[Any]=10 , a_ : str="gpt2_finetuned.pt" , ): lowerCamelCase :int = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''') lowerCamelCase :Any = RandomSampler(a_) lowerCamelCase :Any = DataLoader(a_ , sampler=a_) lowerCamelCase :Optional[Any] = max_steps // (len(a_)) + 1 lowerCamelCase :Optional[Any] = 0 lowerCamelCase :str = torch.zeros((1, context_len) , dtype=torch.long , device=a_) lowerCamelCase , lowerCamelCase , lowerCamelCase :List[str] = recopy_model(a_ , a_ , a_) model.train() if secondary_learner is not None: secondary_learner.to(a_) secondary_learner.eval() lowerCamelCase :Optional[int] = [] lowerCamelCase :Any = 0 lowerCamelCase :Optional[Any] = [] lowerCamelCase :Union[str, Any] = [] # Compute the performance of the transformer model at the beginning lowerCamelCase :Optional[Any] = compute_perplexity(a_ , a_ , a_) test_perps.append(a_) print('''Test perplexity, step''' , a_ , ''':''' , a_) for epoch in range(int(a_)): for step, example in enumerate(a_): torch.cuda.empty_cache() lowerCamelCase :List[str] = random.randint(0 , example.size(2) - context_len - 1) lowerCamelCase :Dict = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() lowerCamelCase :Optional[Any] = model(a_ , labels=a_) lowerCamelCase :Optional[Any] = True if secondary_learner is not None: lowerCamelCase :Optional[Any] = secondary_learner.forward( torch.tensor(a_ , dtype=torch.long , device=a_).unsqueeze(0))[0].item() observed_qs.append(float(a_)) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: lowerCamelCase :str = -1 if predicted_q < threshold: lowerCamelCase :Tuple = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu())) lowerCamelCase :int = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() lowerCamelCase :Optional[int] = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: lowerCamelCase :int = compute_perplexity(a_ , a_ , a_) test_perps.append(a_) print('''Test perplexity, step''' , a_ , ''':''' , a_) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , a_) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''') # Required parameters parser.add_argument( '''--data_dir''' , default=a_ , type=a_ , required=a_ , help='''The input data dir. Should contain data files for WikiText.''' , ) parser.add_argument( '''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--data_file''' , type=a_ , default=a_ , help=( '''A jbl file containing tokenized data which can be split as objective dataset, ''' '''train_dataset and test_dataset.''' ) , ) parser.add_argument( '''--igf_data_file''' , type=a_ , default=a_ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , ) parser.add_argument( '''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the final fine-tuned model is stored.''' , ) parser.add_argument( '''--tokenizer_name''' , default=a_ , type=a_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument('''--seed''' , type=a_ , default=a_ , help='''A seed for reproducible training.''') parser.add_argument( '''--context_len''' , default=32 , type=a_ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--size_objective_set''' , default=1_00 , type=a_ , help='''number of articles that are long enough to be used as our objective set''' , ) parser.add_argument( '''--eval_freq''' , default=1_00 , type=a_ , help='''secondary model evaluation is triggered at eval_freq''') parser.add_argument('''--max_steps''' , default=10_00 , type=a_ , help='''To calculate training epochs''') parser.add_argument( '''--secondary_learner_batch_size''' , default=1_28 , type=a_ , help='''batch size of training data for secondary learner''' , ) parser.add_argument( '''--batch_size''' , default=16 , type=a_ , help='''batch size of training data of language model(gpt2) ''') parser.add_argument( '''--eval_interval''' , default=10 , type=a_ , help=( '''decay the selectivity of our secondary learner filter from''' '''1 standard deviation above average to 1 below average after 10 batches''' ) , ) parser.add_argument( '''--number''' , default=1_00 , type=a_ , help='''The number of examples split to be used as objective_set/test_data''') parser.add_argument( '''--min_len''' , default=10_26 , type=a_ , help='''The minimum length of the article to be used as objective set''') parser.add_argument( '''--secondary_learner_max_epochs''' , default=15 , type=a_ , help='''number of epochs to train secondary learner''') parser.add_argument('''--trim''' , default=a_ , type=a_ , help='''truncate the example if it exceeds context length''') parser.add_argument( '''--threshold''' , default=1.0 , type=a_ , help=( '''The threshold value used by secondary learner to filter the train_data and allow only''' ''' informative data as input to the model''' ) , ) parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a_ , help='''finetuned_model_name''') parser.add_argument( '''--recopy_model''' , default=a_ , type=a_ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=a_ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , ) # Load train data for secondary learner lowerCamelCase :str = joblib.load('''data/IGF_values.jbl''') # Train secondary learner lowerCamelCase :Tuple = training_secondary_learner( a_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , ) # load pretrained gpt2 model lowerCamelCase :Union[str, Any] = GPTaLMHeadModel.from_pretrained('''gpt2''') set_seed(42) # Generate train and test data to train and evaluate gpt2 model lowerCamelCase , lowerCamelCase :int = generate_datasets( context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=a_) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( a_ , a_ , a_ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=a_ , secondary_learner=a_ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , ) if __name__ == "__main__": main()
49
from maths.prime_factors import prime_factors def _lowerCamelCase ( a_ : int): if not isinstance(a_ , a_): lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer" raise TypeError(a_) if number < 1: raise ValueError('''Input must be a positive integer''') return -1 if len(prime_factors(a_)) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
49
1
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ): lowerCamelCase :Optional[Any] = parent lowerCamelCase :List[Any] = batch_size lowerCamelCase :Any = image_size lowerCamelCase :Union[str, Any] = patch_size lowerCamelCase :Any = num_channels lowerCamelCase :List[Any] = is_training lowerCamelCase :Optional[Any] = use_labels lowerCamelCase :Any = hidden_size lowerCamelCase :List[Any] = num_hidden_layers lowerCamelCase :List[str] = num_attention_heads lowerCamelCase :Tuple = intermediate_size lowerCamelCase :List[str] = hidden_act lowerCamelCase :List[str] = hidden_dropout_prob lowerCamelCase :Any = attention_probs_dropout_prob lowerCamelCase :List[Any] = type_sequence_label_size lowerCamelCase :Optional[int] = initializer_range lowerCamelCase :List[Any] = num_labels lowerCamelCase :Any = scope lowerCamelCase :Union[str, Any] = n_targets lowerCamelCase :Optional[Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens def snake_case ( self : List[str] ): lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowerCamelCase :List[str] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowerCamelCase :Optional[int] = [] for i in range(self.batch_size ): lowerCamelCase :List[str] = {} lowerCamelCase :Tuple = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=__snake_case ) lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case ) labels.append(__snake_case ) lowerCamelCase :str = self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] ): return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ): lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :Union[str, Any] = model(__snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ): lowerCamelCase :int = YolosForObjectDetection(__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :str = model(pixel_values=__snake_case ) lowerCamelCase :Any = model(__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def snake_case ( self : int ): lowerCamelCase :List[Any] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () _UpperCAmelCase = ( {'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ): lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowerCamelCase :Dict = [] for i in range(self.model_tester.batch_size ): lowerCamelCase :Optional[Any] = {} lowerCamelCase :List[Any] = torch.ones( size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long ) lowerCamelCase :str = torch.ones( self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float ) labels.append(__snake_case ) lowerCamelCase :Union[str, Any] = labels return inputs_dict def snake_case ( self : Tuple ): lowerCamelCase :Union[str, Any] = YolosModelTester(self ) lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 ) def snake_case ( self : Union[str, Any] ): self.config_tester.run_common_tests() def snake_case ( self : Optional[Any] ): # YOLOS does not use inputs_embeds pass def snake_case ( self : Tuple ): lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :Optional[int] = model_class(__snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase :str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :str = model_class(__snake_case ) lowerCamelCase :Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase :Tuple = [*signature.parameters.keys()] lowerCamelCase :Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __snake_case ) def snake_case ( self : int ): lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase :int = True # in YOLOS, the seq_len is different lowerCamelCase :str = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowerCamelCase :str = True lowerCamelCase :Tuple = False lowerCamelCase :Optional[int] = True lowerCamelCase :int = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :str = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase :Optional[Any] = True lowerCamelCase :str = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Tuple = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowerCamelCase :Optional[int] = len(__snake_case ) # Check attention is always last and order is fine lowerCamelCase :Union[str, Any] = True lowerCamelCase :List[Any] = True lowerCamelCase :Tuple = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Dict = 1 self.assertEqual(out_len + added_hidden_states , len(__snake_case ) ) lowerCamelCase :Dict = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def snake_case ( self : List[str] ): def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ): lowerCamelCase :Union[str, Any] = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Optional[Any] = outputs.hidden_states lowerCamelCase :Any = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__snake_case ) , __snake_case ) # YOLOS has a different seq_length lowerCamelCase :List[str] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :Union[str, Any] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase :Any = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*__snake_case ) @slow def snake_case ( self : Dict ): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def _lowerCamelCase ( ): lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def snake_case ( self : Tuple ): return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def snake_case ( self : Dict ): lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case ) lowerCamelCase :Optional[Any] = self.default_image_processor lowerCamelCase :str = prepare_img() lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # forward pass with torch.no_grad(): lowerCamelCase :Optional[Any] = model(inputs.pixel_values ) # verify outputs lowerCamelCase :int = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , __snake_case ) lowerCamelCase :Any = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , ) lowerCamelCase :Any = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) ) # verify postprocessing lowerCamelCase :List[str] = image_processor.post_process_object_detection( __snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case ) lowerCamelCase :str = [75, 75, 17, 63, 17] lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case ) self.assertEqual(len(results['''scores'''] ) , 5 ) self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
49
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() A__ = logging.get_logger(__name__) def _lowerCamelCase ( a_ : str , a_ : str=False): lowerCamelCase :Optional[int] = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''')) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''')) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''')) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''')) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''')) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''')) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''')) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias")) # transformer encoder for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias")) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ]) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ]) # fmt: on return rename_keys def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False): for i in range(config.num_hidden_layers): if base_model: lowerCamelCase :Union[str, Any] = '''''' else: lowerCamelCase :Optional[int] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight") lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict lowerCamelCase :Any = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size] lowerCamelCase :int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase :Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase :Optional[Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :] def _lowerCamelCase ( a_ : int): lowerCamelCase :Any = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a_ , a_) def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple): lowerCamelCase :Optional[Any] = dct.pop(a_) lowerCamelCase :str = val def _lowerCamelCase ( ): lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw) return im @torch.no_grad() def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False): lowerCamelCase :Optional[int] = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , ) lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00) lowerCamelCase :List[Any] = False # load original model from timm lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCamelCase :List[str] = timm_model.state_dict() if base_model: remove_classification_head_(a_) lowerCamelCase :Tuple = create_rename_keys(a_ , a_) for src, dest in rename_keys: rename_key(a_ , a_ , a_) read_in_q_k_v(a_ , a_ , a_) lowerCamelCase :List[str] = '''huggingface/label-files''' lowerCamelCase :Any = '''imagenet-1k-id2label.json''' lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r''')) lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()} lowerCamelCase :Optional[int] = idalabel lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval() else: lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval() model.load_state_dict(a_) # create image processor lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_)) lowerCamelCase :str = transform.transforms lowerCamelCase :int = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowerCamelCase :Any = ViTHybridImageProcessor( do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowerCamelCase :Dict = prepare_img() lowerCamelCase :str = transform(a_).unsqueeze(0) lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values # verify pixel values assert torch.allclose(a_ , a_) # verify logits with torch.no_grad(): lowerCamelCase :Optional[int] = model(a_) lowerCamelCase :Union[str, Any] = outputs.logits print('''Predicted class:''' , logits.argmax(-1).item()) if base_model: lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3) else: lowerCamelCase :List[str] = timm_model(a_) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a_ , outputs.logits , atol=1e-3) print('''Looks ok!''') if pytorch_dump_folder_path is not None: Path(a_).mkdir(exist_ok=a_) print(F"Saving model {vit_name} to {pytorch_dump_folder_path}") model.save_pretrained(a_) print(F"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(a_) if push_to_hub: print(F"Pushing model and processor to the hub {vit_name}") model.push_to_hub(F"ybelkada/{vit_name}") processor.push_to_hub(F"ybelkada/{vit_name}") if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) A__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
49
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) A__ = { """configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""], """tokenization_lxmert""": ["""LxmertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""LxmertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """LxmertEncoder""", """LxmertForPreTraining""", """LxmertForQuestionAnswering""", """LxmertModel""", """LxmertPreTrainedModel""", """LxmertVisualFeatureEncoder""", """LxmertXLayer""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLxmertForPreTraining""", """TFLxmertMainLayer""", """TFLxmertModel""", """TFLxmertPreTrainedModel""", """TFLxmertVisualFeatureEncoder""", ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
def _lowerCamelCase ( a_ : int = 4_00_00_00): lowerCamelCase :Dict = [0, 1] lowerCamelCase :Optional[Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1]) if fib[i + 2] > n: break i += 1 lowerCamelCase :Dict = 0 for j in range(len(a_) - 1): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'{solution() = }')
49
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _lowerCamelCase ( a_ : Union[str, Any]): lowerCamelCase :Dict = 3_84 if "tiny" in model_name: lowerCamelCase :Optional[int] = [3, 3, 9, 3] lowerCamelCase :int = [96, 1_92, 3_84, 7_68] if "small" in model_name: lowerCamelCase :Any = [3, 3, 27, 3] lowerCamelCase :Any = [96, 1_92, 3_84, 7_68] if "base" in model_name: lowerCamelCase :List[str] = [3, 3, 27, 3] lowerCamelCase :Optional[Any] = [1_28, 2_56, 5_12, 10_24] lowerCamelCase :Any = 5_12 if "large" in model_name: lowerCamelCase :int = [3, 3, 27, 3] lowerCamelCase :Any = [1_92, 3_84, 7_68, 15_36] lowerCamelCase :int = 7_68 if "xlarge" in model_name: lowerCamelCase :int = [3, 3, 27, 3] lowerCamelCase :Union[str, Any] = [2_56, 5_12, 10_24, 20_48] lowerCamelCase :Any = 10_24 # set label information lowerCamelCase :Optional[Any] = 1_50 lowerCamelCase :str = '''huggingface/label-files''' lowerCamelCase :List[str] = '''ade20k-id2label.json''' lowerCamelCase :Any = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r''')) lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()} lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()} lowerCamelCase :List[Any] = ConvNextConfig( depths=a_ , hidden_sizes=a_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4''']) lowerCamelCase :List[Any] = UperNetConfig( backbone_config=a_ , auxiliary_in_channels=a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ , ) return config def _lowerCamelCase ( a_ : List[Any]): lowerCamelCase :str = [] # fmt: off # stem rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''')) rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''')) rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''')) rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''')) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter")) rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight")) rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias")) rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight")) rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias")) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight")) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias")) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight")) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias")) if i > 0: rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight")) rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias")) rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight")) rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias")) rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight")) rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias")) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ]) # fmt: on return rename_keys def _lowerCamelCase ( a_ : int , a_ : List[str] , a_ : int): lowerCamelCase :List[Any] = dct.pop(a_) lowerCamelCase :List[str] = val def _lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] , a_ : int): lowerCamelCase :Optional[int] = { '''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''', '''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''', '''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''', '''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''', '''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''', } lowerCamelCase :List[Any] = model_name_to_url[model_name] lowerCamelCase :Any = torch.hub.load_state_dict_from_url(a_ , map_location='''cpu''')['''state_dict'''] lowerCamelCase :Tuple = get_upernet_config(a_) lowerCamelCase :Optional[int] = UperNetForSemanticSegmentation(a_) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCamelCase :List[Any] = state_dict.pop(a_) if "bn" in key: lowerCamelCase :List[Any] = key.replace('''bn''' , '''batch_norm''') lowerCamelCase :Optional[int] = val # rename keys lowerCamelCase :str = create_rename_keys(a_) for src, dest in rename_keys: rename_key(a_ , a_ , a_) model.load_state_dict(a_) # verify on image lowerCamelCase :Tuple = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw).convert('''RGB''') lowerCamelCase :List[str] = SegformerImageProcessor() lowerCamelCase :Any = processor(a_ , return_tensors='''pt''').pixel_values with torch.no_grad(): lowerCamelCase :Any = model(a_) if model_name == "upernet-convnext-tiny": lowerCamelCase :Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]]) elif model_name == "upernet-convnext-small": lowerCamelCase :Any = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]]) elif model_name == "upernet-convnext-base": lowerCamelCase :Any = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]]) elif model_name == "upernet-convnext-large": lowerCamelCase :List[Any] = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]]) elif model_name == "upernet-convnext-xlarge": lowerCamelCase :Dict = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]]) print('''Logits:''' , outputs.logits[0, 0, :3, :3]) assert torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1e-4) print('''Looks ok!''') if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(a_) print(F"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(a_) if push_to_hub: print(F"Pushing model and processor for {model_name} to hub") model.push_to_hub(F"openmmlab/{model_name}") processor.push_to_hub(F"openmmlab/{model_name}") if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-convnext-tiny""", type=str, choices=[F'upernet-convnext-{size}' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]], help="""Name of the ConvNext UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) A__ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
49
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ = { """configuration_nllb_moe""": [ """NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NllbMoeConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""", """NllbMoeForConditionalGeneration""", """NllbMoeModel""", """NllbMoePreTrainedModel""", """NllbMoeTop2Router""", """NllbMoeSparseMLP""", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
1
def _lowerCamelCase ( a_ : List[Any]): lowerCamelCase :Optional[int] = len(a_) for i in range(length - 1): lowerCamelCase :Dict = i for k in range(i + 1 , a_): if collection[k] < collection[least]: lowerCamelCase :int = k if least != i: lowerCamelCase , lowerCamelCase :List[str] = (collection[i], collection[least]) return collection if __name__ == "__main__": A__ = input("""Enter numbers separated by a comma:\n""").strip() A__ = [int(item) for item in user_input.split(""",""")] print(selection_sort(unsorted))
49
import numpy class _lowerCAmelCase : def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ): lowerCamelCase :Dict = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCamelCase :Dict = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCamelCase :Dict = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCamelCase :Any = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCamelCase :Union[str, Any] = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCamelCase :List[str] = numpy.zeros(output_array.shape ) def snake_case ( self : Optional[int] ): lowerCamelCase :Any = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCamelCase :Any = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCamelCase :Dict = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def snake_case ( self : Any ): lowerCamelCase :Union[str, Any] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCamelCase :Dict = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCamelCase :int = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ): for iteration in range(1 , iterations + 1 ): lowerCamelCase :Union[str, Any] = self.feedforward() self.back_propagation() if give_loss: lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F"Iteration {iteration} Loss: {loss}" ) def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ): lowerCamelCase :int = input_arr lowerCamelCase :Union[str, Any] = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCamelCase :Optional[Any] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCamelCase :Optional[int] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _lowerCamelCase ( a_ : numpy.ndarray): return 1 / (1 + numpy.exp(-value)) def _lowerCamelCase ( a_ : numpy.ndarray): return (value) * (1 - (value)) def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa) # Calling neural network class. lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork( input_array=a_ , output_array=a_) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=a_ , iterations=10 , give_loss=a_) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa)) if __name__ == "__main__": example()
49
1
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[Any] = -1 lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :str = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :str = TextStreamer(__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :Optional[int] = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Dict ): lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[Any] = -1 lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] ) lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case ) lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() lowerCamelCase :Any = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[str] = -1 lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :] lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :int = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Optional[int] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case ) model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n" lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def snake_case ( self : List[Any] ): lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 ) lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__snake_case ): lowerCamelCase :Dict = '''''' for new_text in streamer: streamer_text += new_text
49
def _lowerCamelCase ( a_ : str , a_ : str): lowerCamelCase :List[str] = len(a_) lowerCamelCase :List[str] = len(a_) lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)] lowerCamelCase :Optional[Any] = True for i in range(a_): for j in range(m + 1): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowerCamelCase :Any = True if a[i].islower(): lowerCamelCase :List[str] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
49
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = KandinskyInpaintPipeline _UpperCAmelCase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase = False @property def snake_case ( self : Tuple ): return 32 @property def snake_case ( self : str ): return 32 @property def snake_case ( self : List[Any] ): return self.time_input_dim @property def snake_case ( self : str ): return self.time_input_dim * 4 @property def snake_case ( self : Dict ): return 100 @property def snake_case ( self : List[Any] ): lowerCamelCase :str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def snake_case ( self : Union[str, Any] ): torch.manual_seed(0 ) lowerCamelCase :Any = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) lowerCamelCase :List[str] = MultilingualCLIP(__snake_case ) lowerCamelCase :List[Any] = text_encoder.eval() return text_encoder @property def snake_case ( self : str ): torch.manual_seed(0 ) lowerCamelCase :List[Any] = { '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowerCamelCase :Dict = UNetaDConditionModel(**__snake_case ) return model @property def snake_case ( self : List[Any] ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case ( self : Union[str, Any] ): torch.manual_seed(0 ) lowerCamelCase :str = VQModel(**self.dummy_movq_kwargs ) return model def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = self.dummy_text_encoder lowerCamelCase :int = self.dummy_tokenizer lowerCamelCase :Optional[Any] = self.dummy_unet lowerCamelCase :str = self.dummy_movq lowerCamelCase :Optional[Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__snake_case , ) lowerCamelCase :Optional[int] = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def snake_case ( self : List[Any] , __snake_case : List[Any] , __snake_case : str=0 ): lowerCamelCase :Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case ) lowerCamelCase :Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case ) # create init_image lowerCamelCase :Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case ) lowerCamelCase :str = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase :Tuple = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((256, 256) ) # create mask lowerCamelCase :Tuple = np.ones((64, 64) , dtype=np.floataa ) lowerCamelCase :Tuple = 0 if str(__snake_case ).startswith('''mps''' ): lowerCamelCase :Tuple = torch.manual_seed(__snake_case ) else: lowerCamelCase :Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) lowerCamelCase :Optional[int] = { '''prompt''': '''horse''', '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def snake_case ( self : str ): lowerCamelCase :Dict = '''cpu''' lowerCamelCase :List[str] = self.get_dummy_components() lowerCamelCase :Tuple = self.pipeline_class(**__snake_case ) lowerCamelCase :str = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) lowerCamelCase :Optional[int] = pipe(**self.get_dummy_inputs(__snake_case ) ) lowerCamelCase :Optional[Any] = output.images lowerCamelCase :Tuple = pipe( **self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0] lowerCamelCase :Optional[Any] = image[0, -3:, -3:, -1] lowerCamelCase :List[Any] = image_from_tuple[0, -3:, -3:, -1] print(F"image.shape {image.shape}" ) assert image.shape == (1, 64, 64, 3) lowerCamelCase :Optional[Any] = np.array( [0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def snake_case ( self : Dict ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Optional[Any] ): lowerCamelCase :Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' ) lowerCamelCase :List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowerCamelCase :str = np.ones((768, 768) , dtype=np.floataa ) lowerCamelCase :Optional[int] = 0 lowerCamelCase :Dict = '''a hat''' lowerCamelCase :str = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__snake_case ) lowerCamelCase :Optional[int] = KandinskyInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa ) lowerCamelCase :List[str] = pipeline.to(__snake_case ) pipeline.set_progress_bar_config(disable=__snake_case ) lowerCamelCase :Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase , lowerCamelCase :Dict = pipe_prior( __snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() lowerCamelCase :Optional[int] = pipeline( __snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , ) lowerCamelCase :List[Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__snake_case , __snake_case )
49
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ): lowerCamelCase :Optional[Any] = parent lowerCamelCase :List[Any] = batch_size lowerCamelCase :Any = image_size lowerCamelCase :Union[str, Any] = patch_size lowerCamelCase :Any = num_channels lowerCamelCase :List[Any] = is_training lowerCamelCase :Optional[Any] = use_labels lowerCamelCase :Any = hidden_size lowerCamelCase :List[Any] = num_hidden_layers lowerCamelCase :List[str] = num_attention_heads lowerCamelCase :Tuple = intermediate_size lowerCamelCase :List[str] = hidden_act lowerCamelCase :List[str] = hidden_dropout_prob lowerCamelCase :Any = attention_probs_dropout_prob lowerCamelCase :List[Any] = type_sequence_label_size lowerCamelCase :Optional[int] = initializer_range lowerCamelCase :List[Any] = num_labels lowerCamelCase :Any = scope lowerCamelCase :Union[str, Any] = n_targets lowerCamelCase :Optional[Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens def snake_case ( self : List[str] ): lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowerCamelCase :List[str] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowerCamelCase :Optional[int] = [] for i in range(self.batch_size ): lowerCamelCase :List[str] = {} lowerCamelCase :Tuple = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=__snake_case ) lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case ) labels.append(__snake_case ) lowerCamelCase :str = self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] ): return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ): lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :Union[str, Any] = model(__snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ): lowerCamelCase :int = YolosForObjectDetection(__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :str = model(pixel_values=__snake_case ) lowerCamelCase :Any = model(__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def snake_case ( self : int ): lowerCamelCase :List[Any] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () _UpperCAmelCase = ( {'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ): lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowerCamelCase :Dict = [] for i in range(self.model_tester.batch_size ): lowerCamelCase :Optional[Any] = {} lowerCamelCase :List[Any] = torch.ones( size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long ) lowerCamelCase :str = torch.ones( self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float ) labels.append(__snake_case ) lowerCamelCase :Union[str, Any] = labels return inputs_dict def snake_case ( self : Tuple ): lowerCamelCase :Union[str, Any] = YolosModelTester(self ) lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 ) def snake_case ( self : Union[str, Any] ): self.config_tester.run_common_tests() def snake_case ( self : Optional[Any] ): # YOLOS does not use inputs_embeds pass def snake_case ( self : Tuple ): lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :Optional[int] = model_class(__snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase :str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :str = model_class(__snake_case ) lowerCamelCase :Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase :Tuple = [*signature.parameters.keys()] lowerCamelCase :Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __snake_case ) def snake_case ( self : int ): lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase :int = True # in YOLOS, the seq_len is different lowerCamelCase :str = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowerCamelCase :str = True lowerCamelCase :Tuple = False lowerCamelCase :Optional[int] = True lowerCamelCase :int = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :str = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase :Optional[Any] = True lowerCamelCase :str = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Tuple = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowerCamelCase :Optional[int] = len(__snake_case ) # Check attention is always last and order is fine lowerCamelCase :Union[str, Any] = True lowerCamelCase :List[Any] = True lowerCamelCase :Tuple = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Dict = 1 self.assertEqual(out_len + added_hidden_states , len(__snake_case ) ) lowerCamelCase :Dict = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def snake_case ( self : List[str] ): def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ): lowerCamelCase :Union[str, Any] = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Optional[Any] = outputs.hidden_states lowerCamelCase :Any = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__snake_case ) , __snake_case ) # YOLOS has a different seq_length lowerCamelCase :List[str] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :Union[str, Any] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase :Any = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*__snake_case ) @slow def snake_case ( self : Dict ): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def _lowerCamelCase ( ): lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def snake_case ( self : Tuple ): return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def snake_case ( self : Dict ): lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case ) lowerCamelCase :Optional[Any] = self.default_image_processor lowerCamelCase :str = prepare_img() lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # forward pass with torch.no_grad(): lowerCamelCase :Optional[Any] = model(inputs.pixel_values ) # verify outputs lowerCamelCase :int = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , __snake_case ) lowerCamelCase :Any = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , ) lowerCamelCase :Any = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) ) # verify postprocessing lowerCamelCase :List[str] = image_processor.post_process_object_detection( __snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case ) lowerCamelCase :str = [75, 75, 17, 63, 17] lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case ) self.assertEqual(len(results['''scores'''] ) , 5 ) self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
49
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() A__ = logging.get_logger(__name__) A__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } A__ = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCamelCase ( a_ : Optional[int] , a_ : List[Any] , a_ : str , a_ : Optional[Any] , a_ : Optional[Any] , a_ : str): for attribute in key.split('''.'''): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowerCamelCase :str = '''lm_head''' lowerCamelCase :Union[str, Any] = getattr(a_ , a_) if weight_type is not None: lowerCamelCase :List[Any] = getattr(a_ , a_).shape else: lowerCamelCase :Any = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": lowerCamelCase :Dict = value elif weight_type == "weight_g": lowerCamelCase :Tuple = value elif weight_type == "weight_v": lowerCamelCase :Optional[Any] = value elif weight_type == "bias": lowerCamelCase :Optional[int] = value else: lowerCamelCase :str = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def _lowerCamelCase ( a_ : Union[str, Any] , a_ : Dict , a_ : Any): lowerCamelCase :Any = [] lowerCamelCase :Tuple = fairseq_model.state_dict() lowerCamelCase :str = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase :Dict = False if "conv_layers" in name: load_conv_layer( a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , ) lowerCamelCase :str = True else: for key, mapped_key in MAPPING.items(): lowerCamelCase :List[str] = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]: lowerCamelCase :List[Any] = True if "*" in mapped_key: lowerCamelCase :Union[str, Any] = name.split(a_)[0].split('''.''')[-2] lowerCamelCase :List[Any] = mapped_key.replace('''*''' , a_) if "weight_g" in name: lowerCamelCase :List[Any] = '''weight_g''' elif "weight_v" in name: lowerCamelCase :Union[str, Any] = '''weight_v''' elif "bias" in name: lowerCamelCase :Union[str, Any] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase :Dict = '''weight''' else: lowerCamelCase :Optional[Any] = None set_recursively(a_ , a_ , a_ , a_ , a_ , a_) continue if not is_used: unused_weights.append(a_) logger.warning(F"Unused weights: {unused_weights}") def _lowerCamelCase ( a_ : Union[str, Any] , a_ : List[str] , a_ : Dict , a_ : Optional[Any] , a_ : Union[str, Any]): lowerCamelCase :Optional[Any] = full_name.split('''conv_layers.''')[-1] lowerCamelCase :List[str] = name.split('''.''') lowerCamelCase :Dict = int(items[0]) lowerCamelCase :Union[str, Any] = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) lowerCamelCase :List[str] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) lowerCamelCase :Optional[int] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) lowerCamelCase :int = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) lowerCamelCase :Optional[int] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(a_) @torch.no_grad() def _lowerCamelCase ( a_ : Tuple , a_ : Optional[int] , a_ : Any=None , a_ : Dict=None , a_ : List[Any]=True): if config_path is not None: lowerCamelCase :Dict = UniSpeechConfig.from_pretrained(a_) else: lowerCamelCase :Tuple = UniSpeechConfig() if is_finetuned: if dict_path: lowerCamelCase :Any = Dictionary.load_from_json(a_) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase :List[str] = target_dict.pad_index lowerCamelCase :Dict = target_dict.bos_index lowerCamelCase :Optional[int] = target_dict.eos_index lowerCamelCase :Any = len(target_dict.symbols) lowerCamelCase :List[str] = os.path.join(a_ , '''vocab.json''') if not os.path.isdir(a_): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a_)) return os.makedirs(a_ , exist_ok=a_) lowerCamelCase :str = target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase :Optional[int] = 42 lowerCamelCase :Any = 43 with open(a_ , '''w''' , encoding='''utf-8''') as vocab_handle: json.dump(a_ , a_) lowerCamelCase :Optional[Any] = WavaVecaPhonemeCTCTokenizer( a_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a_ , ) lowerCamelCase :Optional[int] = True if config.feat_extract_norm == '''layer''' else False lowerCamelCase :int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a_ , return_attention_mask=a_ , ) lowerCamelCase :Dict = WavaVecaProcessor(feature_extractor=a_ , tokenizer=a_) processor.save_pretrained(a_) lowerCamelCase :int = UniSpeechForCTC(a_) else: lowerCamelCase :Optional[Any] = UniSpeechForPreTraining(a_) if is_finetuned: lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1]), '''w2v_path''': checkpoint_path}) else: lowerCamelCase , lowerCamelCase , lowerCamelCase :str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) lowerCamelCase :Union[str, Any] = model[0].eval() recursively_load_weights(a_ , a_ , a_) hf_unispeech.save_pretrained(a_) if __name__ == "__main__": A__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) A__ = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
49
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Tuple ): lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils ) lowerCamelCase :Dict = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase :Any = test_metrics @require_cpu def snake_case ( self : Dict ): debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def snake_case ( self : int ): debug_launcher(self.test_metrics.main ) @require_single_gpu def snake_case ( self : Any ): self.test_metrics.main() @require_multi_gpu def snake_case ( self : Optional[int] ): print(F"Found {torch.cuda.device_count()} devices." ) lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__snake_case , env=os.environ.copy() )
49
1
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = LEDTokenizer _UpperCAmelCase = LEDTokenizerFast _UpperCAmelCase = True def snake_case ( self : Any ): super().setUp() lowerCamelCase :Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCamelCase :int = {'''unk_token''': '''<unk>'''} lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__snake_case ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__snake_case ) ) def snake_case ( self : int , **__snake_case : int ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Dict , **__snake_case : Any ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ): return "lower newer", "lower newer" @cached_property def snake_case ( self : Any ): return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def snake_case ( self : int ): return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def snake_case ( self : str ): lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowerCamelCase :List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(__snake_case , __snake_case ) @require_torch def snake_case ( self : Tuple ): lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' ) self.assertIn('''input_ids''' , __snake_case ) self.assertIn('''attention_mask''' , __snake_case ) self.assertNotIn('''labels''' , __snake_case ) self.assertNotIn('''decoder_attention_mask''' , __snake_case ) @require_torch def snake_case ( self : Union[str, Any] ): lowerCamelCase :Union[str, Any] = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def snake_case ( self : List[Any] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[Any] = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def snake_case ( self : Optional[int] ): lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.'''] lowerCamelCase :Any = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' ) lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' ) lowerCamelCase :Optional[int] = inputs['''input_ids'''] lowerCamelCase :Any = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def snake_case ( self : Dict ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.'''] lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case ) lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']] lowerCamelCase :str = tokenizer.pad(__snake_case ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case ) def snake_case ( self : Tuple ): pass def snake_case ( self : Optional[int] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :int = '''A, <mask> AllenNLP sentence.''' lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case ) lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( __snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
49
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = '' _UpperCAmelCase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _UpperCAmelCase = None # compression type in fsspec. ex: "gzip" _UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ): super().__init__(self , **__snake_case ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase :Optional[Any] = fsspec.open( __snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] ) lowerCamelCase :Dict = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) lowerCamelCase :List[str] = None @classmethod def snake_case ( cls : Any , __snake_case : Any ): # compressed file paths are always relative to the archive root return super()._strip_protocol(__snake_case ).lstrip('''/''' ) def snake_case ( self : Any ): if self.dir_cache is None: lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} lowerCamelCase :Optional[Any] = {f['''name''']: f} def snake_case ( self : Union[str, Any] , __snake_case : str ): return self.file.open().read() def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ): lowerCamelCase :List[str] = self._strip_protocol(__snake_case ) if mode != "rb": raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" ) return self.file.open() class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'bz2' _UpperCAmelCase = 'bz2' _UpperCAmelCase = '.bz2' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'gzip' _UpperCAmelCase = 'gzip' _UpperCAmelCase = '.gz' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'lz4' _UpperCAmelCase = 'lz4' _UpperCAmelCase = '.lz4' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'xz' _UpperCAmelCase = 'xz' _UpperCAmelCase = '.xz' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'zstd' _UpperCAmelCase = 'zstd' _UpperCAmelCase = '.zst' def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ): super().__init__( fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase :Tuple = self.file.__enter__ class _lowerCAmelCase : def __init__( self : Dict , __snake_case : Tuple ): lowerCamelCase :Optional[int] = file_ def __enter__( self : Optional[int] ): self._file.__enter__() return self def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ): self._file.__exit__(*__snake_case , **__snake_case ) def __iter__( self : Optional[Any] ): return iter(self._file ) def snake_case ( self : List[Any] ): return next(self._file ) def __getattr__( self : Any , __snake_case : str ): return getattr(self._file , __snake_case ) def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ): return WrappedFile(_enter(*__snake_case , **__snake_case ) ) lowerCamelCase :Dict = fixed_enter
49
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin A__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A__ = 250_004 A__ = 250_020 @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = MBartaaTokenizer _UpperCAmelCase = MBartaaTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def snake_case ( self : Union[str, Any] ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase :Optional[int] = MBartaaTokenizer(__snake_case , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : Dict ): lowerCamelCase :int = '''<s>''' lowerCamelCase :int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case ) def snake_case ( self : Union[str, Any] ): lowerCamelCase :Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(__snake_case ) , 1054 ) def snake_case ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def snake_case ( self : List[Any] ): lowerCamelCase :List[Any] = MBartaaTokenizer(__snake_case , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__snake_case ) lowerCamelCase :List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase :List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) lowerCamelCase :Tuple = tokenizer.convert_tokens_to_ids(__snake_case ) self.assertListEqual( __snake_case , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowerCamelCase :str = tokenizer.convert_ids_to_tokens(__snake_case ) self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def snake_case ( self : Union[str, Any] ): # fmt: off lowerCamelCase :Optional[Any] = {'''input_ids''': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def snake_case ( self : Optional[Any] ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCamelCase :Optional[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCamelCase :Tuple = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :int = tempfile.mkdtemp() lowerCamelCase :Union[str, Any] = tokenizer_r.save_pretrained(__snake_case ) lowerCamelCase :List[Any] = tokenizer_p.save_pretrained(__snake_case ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCamelCase :Union[str, Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(__snake_case , __snake_case ) # Checks everything loads correctly in the same way lowerCamelCase :Any = tokenizer_r.from_pretrained(__snake_case ) lowerCamelCase :Any = tokenizer_p.from_pretrained(__snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__snake_case , __snake_case ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__snake_case ) # Save tokenizer rust, legacy_format=True lowerCamelCase :Union[str, Any] = tempfile.mkdtemp() lowerCamelCase :List[Any] = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case ) lowerCamelCase :List[str] = tokenizer_p.save_pretrained(__snake_case ) # Checks it save with the same files self.assertSequenceEqual(__snake_case , __snake_case ) # Checks everything loads correctly in the same way lowerCamelCase :Union[str, Any] = tokenizer_r.from_pretrained(__snake_case ) lowerCamelCase :Tuple = tokenizer_p.from_pretrained(__snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__snake_case , __snake_case ) ) shutil.rmtree(__snake_case ) # Save tokenizer rust, legacy_format=False lowerCamelCase :Optional[Any] = tempfile.mkdtemp() lowerCamelCase :Optional[Any] = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case ) lowerCamelCase :Optional[Any] = tokenizer_p.save_pretrained(__snake_case ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCamelCase :Dict = tokenizer_r.from_pretrained(__snake_case ) lowerCamelCase :List[Any] = tokenizer_p.from_pretrained(__snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__snake_case , __snake_case ) ) shutil.rmtree(__snake_case ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): _UpperCAmelCase = 'facebook/mbart-large-50-one-to-many-mmt' _UpperCAmelCase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _UpperCAmelCase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _UpperCAmelCase = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def snake_case ( cls : Any ): lowerCamelCase :MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) lowerCamelCase :str = 1 return cls def snake_case ( self : Union[str, Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 250038 ) def snake_case ( self : Optional[int] ): lowerCamelCase :Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __snake_case ) def snake_case ( self : Optional[int] ): self.assertIn(__snake_case , self.tokenizer.all_special_ids ) lowerCamelCase :Tuple = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] lowerCamelCase :List[str] = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) lowerCamelCase :Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case ) self.assertEqual(__snake_case , __snake_case ) self.assertNotIn(self.tokenizer.eos_token , __snake_case ) def snake_case ( self : Tuple ): lowerCamelCase :Any = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , __snake_case ) lowerCamelCase :Any = 10 lowerCamelCase :List[str] = self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0] self.assertEqual(ids[0] , __snake_case ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(__snake_case ) , __snake_case ) def snake_case ( self : Dict ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250053, 250001] ) def snake_case ( self : Any ): lowerCamelCase :Any = tempfile.mkdtemp() lowerCamelCase :List[Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__snake_case ) lowerCamelCase :List[str] = MBartaaTokenizer.from_pretrained(__snake_case ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case ) @require_torch def snake_case ( self : List[Any] ): lowerCamelCase :Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__snake_case , return_tensors='''pt''' ) lowerCamelCase :Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def snake_case ( self : Union[str, Any] ): lowerCamelCase :Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) lowerCamelCase :Any = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) lowerCamelCase :Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __snake_case ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def snake_case ( self : Optional[Any] ): lowerCamelCase :int = self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='''pt''' ) lowerCamelCase :Tuple = self.tokenizer( text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='''pt''' ) lowerCamelCase :Optional[Any] = targets['''input_ids'''] lowerCamelCase :List[str] = shift_tokens_right(__snake_case , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def snake_case ( self : List[str] ): lowerCamelCase :Tuple = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(__snake_case ) , { # en_XX, A, test, EOS '''input_ids''': [[250004, 62, 3034, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250001, } , )
49
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = LEDTokenizer _UpperCAmelCase = LEDTokenizerFast _UpperCAmelCase = True def snake_case ( self : Any ): super().setUp() lowerCamelCase :Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCamelCase :int = {'''unk_token''': '''<unk>'''} lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__snake_case ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__snake_case ) ) def snake_case ( self : int , **__snake_case : int ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Dict , **__snake_case : Any ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ): return "lower newer", "lower newer" @cached_property def snake_case ( self : Any ): return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def snake_case ( self : int ): return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def snake_case ( self : str ): lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowerCamelCase :List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(__snake_case , __snake_case ) @require_torch def snake_case ( self : Tuple ): lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' ) self.assertIn('''input_ids''' , __snake_case ) self.assertIn('''attention_mask''' , __snake_case ) self.assertNotIn('''labels''' , __snake_case ) self.assertNotIn('''decoder_attention_mask''' , __snake_case ) @require_torch def snake_case ( self : Union[str, Any] ): lowerCamelCase :Union[str, Any] = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def snake_case ( self : List[Any] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[Any] = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def snake_case ( self : Optional[int] ): lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.'''] lowerCamelCase :Any = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' ) lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' ) lowerCamelCase :Optional[int] = inputs['''input_ids'''] lowerCamelCase :Any = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def snake_case ( self : Dict ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.'''] lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case ) lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']] lowerCamelCase :str = tokenizer.pad(__snake_case ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case ) def snake_case ( self : Tuple ): pass def snake_case ( self : Optional[int] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :int = '''A, <mask> AllenNLP sentence.''' lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case ) lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( __snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
49
1
def _lowerCamelCase ( a_ : int = 4_00_00_00): lowerCamelCase :Dict = [0, 1] lowerCamelCase :Optional[Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1]) if fib[i + 2] > n: break i += 1 lowerCamelCase :Dict = 0 for j in range(len(a_) - 1): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'{solution() = }')
49
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) A__ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""LayoutLMv2FeatureExtractor"""] A__ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
1
from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['torch', 'transformers', 'onnx'] def __init__( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : str ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : Union[str, Any] , *__snake_case : List[str] , **__snake_case : List[str] ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : Tuple , *__snake_case : Dict , **__snake_case : Tuple ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *__snake_case : Any , **__snake_case : List[Any] ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : Any , *__snake_case : str , **__snake_case : int ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : Any , *__snake_case : Any , **__snake_case : Tuple ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['torch', 'transformers', 'onnx'] def __init__( self : int , *__snake_case : Dict , **__snake_case : List[str] ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : str , *__snake_case : Tuple , **__snake_case : Any ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : Any , *__snake_case : str , **__snake_case : Any ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['torch', 'transformers', 'onnx'] def __init__( self : Tuple , *__snake_case : Any , **__snake_case : Union[str, Any] ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : Any , *__snake_case : Union[str, Any] , **__snake_case : Union[str, Any] ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : int , *__snake_case : Dict , **__snake_case : Union[str, Any] ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['torch', 'transformers', 'onnx'] def __init__( self : Any , *__snake_case : Any , **__snake_case : str ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : Any , *__snake_case : Tuple , **__snake_case : Dict ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : Optional[Any] , *__snake_case : Dict , **__snake_case : str ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['torch', 'transformers', 'onnx'] def __init__( self : List[str] , *__snake_case : str , **__snake_case : int ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : int , *__snake_case : Tuple , **__snake_case : Optional[Any] ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def snake_case ( cls : Dict , *__snake_case : int , **__snake_case : str ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
49
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def snake_case ( *__snake_case : str , **__snake_case : str ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def snake_case ( self : Union[str, Any] ): lowerCamelCase :Optional[int] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__snake_case ) , [ [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}], [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}], ] , ) lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], ] , ) @require_tf def snake_case ( self : Tuple ): lowerCamelCase :Tuple = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , ) lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], ] , ) @slow @require_torch def snake_case ( self : Any ): lowerCamelCase :str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__snake_case ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__snake_case ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , )
49
1
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html A__ = """platform""" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def _lowerCamelCase ( a_ : str , a_ : Union[str, Any] , a_ : List[str]=None , a_ : Optional[Any]=None , a_ : List[str]=None , a_ : List[str]=None , a_ : Optional[Any]=None , a_ : int=None , ): if attention_mask is None: lowerCamelCase :Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0) if decoder_attention_mask is None: lowerCamelCase :Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0) if head_mask is None: lowerCamelCase :List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: lowerCamelCase :List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: lowerCamelCase :Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _lowerCAmelCase : def __init__( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any]=13 , __snake_case : Any=7 , __snake_case : List[str]=True , __snake_case : Any=False , __snake_case : Optional[int]=99 , __snake_case : List[Any]=16 , __snake_case : str=2 , __snake_case : List[Any]=4 , __snake_case : List[Any]=4 , __snake_case : Tuple="gelu" , __snake_case : str=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=32 , __snake_case : int=2 , __snake_case : Optional[int]=1 , __snake_case : str=0 , __snake_case : str=0.0_2 , ): lowerCamelCase :Optional[Any] = parent lowerCamelCase :Any = batch_size lowerCamelCase :str = seq_length lowerCamelCase :Optional[Any] = is_training lowerCamelCase :Tuple = use_labels lowerCamelCase :List[str] = vocab_size lowerCamelCase :Any = hidden_size lowerCamelCase :str = num_hidden_layers lowerCamelCase :Any = num_attention_heads lowerCamelCase :Tuple = intermediate_size lowerCamelCase :List[Any] = hidden_act lowerCamelCase :Tuple = hidden_dropout_prob lowerCamelCase :Dict = attention_probs_dropout_prob lowerCamelCase :List[Any] = max_position_embeddings lowerCamelCase :List[str] = eos_token_id lowerCamelCase :Tuple = pad_token_id lowerCamelCase :Tuple = bos_token_id lowerCamelCase :int = initializer_range def snake_case ( self : str ): lowerCamelCase :str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowerCamelCase :Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowerCamelCase :List[Any] = shift_tokens_right(__snake_case , 1 , 2 ) lowerCamelCase :Optional[int] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__snake_case , ) lowerCamelCase :Dict = prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case ) return config, inputs_dict def snake_case ( self : Optional[Any] ): lowerCamelCase , lowerCamelCase :Optional[int] = self.prepare_config_and_inputs() return config, inputs_dict def snake_case ( self : List[str] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Union[str, Any] ): lowerCamelCase :Tuple = 20 lowerCamelCase :Union[str, Any] = model_class_name(__snake_case ) lowerCamelCase :Optional[Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCamelCase , lowerCamelCase :Dict = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCamelCase :Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case ) lowerCamelCase :Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowerCamelCase :int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase :Optional[Any] = model.decode( decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , ) lowerCamelCase :Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCamelCase :str = model.decode( decoder_input_ids[:, -1:] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__snake_case , ) lowerCamelCase :Optional[int] = model.decode(__snake_case , __snake_case ) lowerCamelCase :Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" ) def snake_case ( self : Optional[int] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Tuple ): lowerCamelCase :List[str] = 20 lowerCamelCase :Union[str, Any] = model_class_name(__snake_case ) lowerCamelCase :Any = model.encode(inputs_dict['''input_ids'''] ) lowerCamelCase , lowerCamelCase :Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCamelCase :str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCamelCase :Optional[int] = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case ) lowerCamelCase :Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase :int = model.decode( decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , ) lowerCamelCase :Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCamelCase :int = model.decode( decoder_input_ids[:, -1:] , __snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__snake_case , decoder_position_ids=__snake_case , ) lowerCamelCase :str = model.decode(__snake_case , __snake_case , decoder_attention_mask=__snake_case ) lowerCamelCase :Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" ) @require_flax class _lowerCAmelCase ( unittest.TestCase ): _UpperCAmelCase = 9_9 def snake_case ( self : Optional[Any] ): lowerCamelCase :Dict = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) lowerCamelCase :Union[str, Any] = input_ids.shape[0] lowerCamelCase :Any = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def snake_case ( self : List[str] ): lowerCamelCase , lowerCamelCase , lowerCamelCase :List[Any] = self._get_config_and_data() lowerCamelCase :str = FlaxBlenderbotSmallForConditionalGeneration(__snake_case ) lowerCamelCase :Any = lm_model(input_ids=__snake_case ) lowerCamelCase :Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :int = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) lowerCamelCase :Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(__snake_case ) lowerCamelCase :List[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) lowerCamelCase :List[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) lowerCamelCase :List[str] = lm_model(input_ids=__snake_case , decoder_input_ids=__snake_case ) lowerCamelCase :Tuple = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , __snake_case ) def snake_case ( self : Dict ): lowerCamelCase :Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) lowerCamelCase :Union[str, Any] = shift_tokens_right(__snake_case , 1 , 2 ) lowerCamelCase :Dict = np.equal(__snake_case , 1 ).astype(np.floataa ).sum() lowerCamelCase :str = np.equal(__snake_case , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(__snake_case , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = True _UpperCAmelCase = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) _UpperCAmelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def snake_case ( self : int ): lowerCamelCase :Optional[int] = FlaxBlenderbotSmallModelTester(self ) def snake_case ( self : Union[str, Any] ): lowerCamelCase , lowerCamelCase :Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__snake_case , __snake_case , __snake_case ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__snake_case , __snake_case , __snake_case ) def snake_case ( self : Union[str, Any] ): lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase :Tuple = self._prepare_for_class(__snake_case , __snake_case ) lowerCamelCase :Optional[int] = model_class(__snake_case ) @jax.jit def encode_jitted(__snake_case : Tuple , __snake_case : Optional[int]=None , **__snake_case : Optional[Any] ): return model.encode(input_ids=__snake_case , attention_mask=__snake_case ) with self.subTest('''JIT Enabled''' ): lowerCamelCase :Optional[int] = encode_jitted(**__snake_case ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCamelCase :Any = encode_jitted(**__snake_case ).to_tuple() self.assertEqual(len(__snake_case ) , len(__snake_case ) ) for jitted_output, output in zip(__snake_case , __snake_case ): self.assertEqual(jitted_output.shape , output.shape ) def snake_case ( self : int ): lowerCamelCase , lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase :Tuple = model_class(__snake_case ) lowerCamelCase :Dict = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowerCamelCase :Tuple = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(__snake_case : Any , __snake_case : Any , __snake_case : Union[str, Any] ): return model.decode( decoder_input_ids=__snake_case , decoder_attention_mask=__snake_case , encoder_outputs=__snake_case , ) with self.subTest('''JIT Enabled''' ): lowerCamelCase :Tuple = decode_jitted(**__snake_case ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCamelCase :Optional[Any] = decode_jitted(**__snake_case ).to_tuple() self.assertEqual(len(__snake_case ) , len(__snake_case ) ) for jitted_output, output in zip(__snake_case , __snake_case ): self.assertEqual(jitted_output.shape , output.shape ) @slow def snake_case ( self : List[str] ): for model_class_name in self.all_model_classes: lowerCamelCase :Optional[Any] = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCamelCase :Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id lowerCamelCase :Union[str, Any] = model(__snake_case ) self.assertIsNotNone(__snake_case )
49
import operator as op def _lowerCamelCase ( a_ : Tuple): lowerCamelCase :int = [] lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation lowerCamelCase :Optional[int] = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''') print('''-''' * (30 + len(a_))) for x in post_fix: if x.isdigit(): # if x in digit stack.append(a_) # append x to stack # output in tabular format print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') else: lowerCamelCase :Optional[Any] = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') lowerCamelCase :str = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') stack.append( str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , ) return int(stack[0]) if __name__ == "__main__": A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """) print("""\n\tResult = """, solve(Postfix))
49
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'char' _UpperCAmelCase = 'bpe' _UpperCAmelCase = 'wp' A__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['image_processor', 'char_tokenizer'] _UpperCAmelCase = 'ViTImageProcessor' _UpperCAmelCase = 'MgpstrTokenizer' def __init__( self : List[str] , __snake_case : Dict=None , __snake_case : Tuple=None , **__snake_case : Tuple ): lowerCamelCase :Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __snake_case , ) lowerCamelCase :List[Any] = kwargs.pop('''feature_extractor''' ) lowerCamelCase :List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) lowerCamelCase :Dict = tokenizer lowerCamelCase :Any = AutoTokenizer.from_pretrained('''gpt2''' ) lowerCamelCase :Optional[int] = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(__snake_case , __snake_case ) def __call__( self : Optional[Any] , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None , __snake_case : Dict=None , **__snake_case : Tuple ): if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: lowerCamelCase :List[str] = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case ) if text is not None: lowerCamelCase :Any = self.char_tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case ) if text is None: return inputs elif images is None: return encodings else: lowerCamelCase :Tuple = encodings['''input_ids'''] return inputs def snake_case ( self : Optional[int] , __snake_case : Tuple ): lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = sequences lowerCamelCase :Union[str, Any] = char_preds.size(0 ) lowerCamelCase , lowerCamelCase :Tuple = self._decode_helper(__snake_case , '''char''' ) lowerCamelCase , lowerCamelCase :Tuple = self._decode_helper(__snake_case , '''bpe''' ) lowerCamelCase , lowerCamelCase :Optional[Any] = self._decode_helper(__snake_case , '''wp''' ) lowerCamelCase :Optional[Any] = [] lowerCamelCase :Dict = [] for i in range(__snake_case ): lowerCamelCase :Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]] lowerCamelCase :Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]] lowerCamelCase :Dict = scores.index(max(__snake_case ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) lowerCamelCase :Tuple = {} lowerCamelCase :int = final_strs lowerCamelCase :str = final_scores lowerCamelCase :Dict = char_strs lowerCamelCase :Optional[int] = bpe_strs lowerCamelCase :str = wp_strs return out def snake_case ( self : List[str] , __snake_case : Tuple , __snake_case : List[Any] ): if format == DecodeType.CHARACTER: lowerCamelCase :List[Any] = self.char_decode lowerCamelCase :List[Any] = 1 lowerCamelCase :Tuple = '''[s]''' elif format == DecodeType.BPE: lowerCamelCase :Tuple = self.bpe_decode lowerCamelCase :Dict = 2 lowerCamelCase :str = '''#''' elif format == DecodeType.WORDPIECE: lowerCamelCase :Dict = self.wp_decode lowerCamelCase :List[Any] = 102 lowerCamelCase :List[Any] = '''[SEP]''' else: raise ValueError(F"Format {format} is not supported." ) lowerCamelCase , lowerCamelCase :Any = [], [] lowerCamelCase :Dict = pred_logits.size(0 ) lowerCamelCase :Any = pred_logits.size(1 ) lowerCamelCase , lowerCamelCase :Dict = pred_logits.topk(1 , dim=-1 , largest=__snake_case , sorted=__snake_case ) lowerCamelCase :Optional[Any] = preds_index.view(-1 , __snake_case )[:, 1:] lowerCamelCase :int = decoder(__snake_case ) lowerCamelCase , lowerCamelCase :Optional[int] = torch.nn.functional.softmax(__snake_case , dim=2 ).max(dim=2 ) lowerCamelCase :Optional[Any] = preds_max_prob[:, 1:] for index in range(__snake_case ): lowerCamelCase :str = preds_str[index].find(__snake_case ) lowerCamelCase :List[Any] = preds_str[index][:pred_eos] lowerCamelCase :Union[str, Any] = preds_index[index].cpu().tolist() lowerCamelCase :Dict = pred_index.index(__snake_case ) if eos_token in pred_index else -1 lowerCamelCase :Optional[Any] = preds_max_prob[index][: pred_eos_index + 1] lowerCamelCase :Any = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__snake_case ) conf_scores.append(__snake_case ) return dec_strs, conf_scores def snake_case ( self : Union[str, Any] , __snake_case : Dict ): lowerCamelCase :int = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__snake_case )] return decode_strs def snake_case ( self : List[Any] , __snake_case : Tuple ): return self.bpe_tokenizer.batch_decode(__snake_case ) def snake_case ( self : Optional[Any] , __snake_case : str ): lowerCamelCase :Dict = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__snake_case )] return decode_strs
49
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() A__ = logging.get_logger(__name__) A__ = """Hello, World!""" A__ = """en_XX""" def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool): lowerCamelCase :int = Path('''data_bin''') lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , ) xmod.eval() # disable dropout print(a_) lowerCamelCase :Any = xmod.model.encoder.sentence_encoder lowerCamelCase :List[str] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our X-MOD config:''' , a_) lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_) model.eval() # Now let's copy all the weights. # Embeddings lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight lowerCamelCase :List[str] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them. lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers): # Encoder: start of layer lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i] lowerCamelCase :List[str] = xmod_sent_encoder.layers[i] # self attention lowerCamelCase :Optional[int] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) ): raise AssertionError('''Dimensions of self-attention weights do not match.''') lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias # self-attention output lowerCamelCase :Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''') lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias # intermediate lowerCamelCase :Optional[int] = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''') lowerCamelCase :int = xmod_layer.fca.weight lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias # output lowerCamelCase :List[str] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''') lowerCamelCase :str = xmod_layer.fca.weight lowerCamelCase :int = xmod_layer.fca.bias lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()): raise AssertionError('''Lists of language adapters do not match.''') for lang_code, adapter in xmod_layer.adapter_modules.items(): lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code] lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code] lowerCamelCase :List[Any] = from_adapter.fca.weight lowerCamelCase :List[Any] = from_adapter.fca.bias lowerCamelCase :Dict = from_adapter.fca.weight lowerCamelCase :Optional[Any] = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight lowerCamelCase :Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1 model.roberta.set_default_language(a_) lowerCamelCase :Any = model(a_)[0] if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_)) else: lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0] print(our_output.shape , their_output.shape) lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item() print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7 lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''') if not success: raise Exception('''Something went wRoNg''') Path(a_).mkdir(parents=a_ , exist_ok=a_) print(F"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(a_) if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) A__ = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
49
1
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : int=None , __snake_case : Dict=True , __snake_case : Tuple=None , **__snake_case : str ): lowerCamelCase :Any = parent lowerCamelCase :List[str] = config_class lowerCamelCase :Optional[int] = has_text_modality lowerCamelCase :List[str] = kwargs lowerCamelCase :Tuple = common_properties def snake_case ( self : Dict ): lowerCamelCase :Optional[Any] = self.config_class(**self.inputs_dict ) lowerCamelCase :List[Any] = ( ['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers'''] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['''vocab_size'''] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(__snake_case , __snake_case ) , msg=F"`{prop}` does not exist" ) # Test that config has the common properties as setter for idx, name in enumerate(__snake_case ): try: setattr(__snake_case , __snake_case , __snake_case ) self.parent.assertEqual( getattr(__snake_case , __snake_case ) , __snake_case , msg=F"`{name} value {idx} expected, but was {getattr(__snake_case , __snake_case )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(__snake_case ): try: lowerCamelCase :List[str] = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(__snake_case , __snake_case ) , __snake_case , msg=F"`{name} value {idx} expected, but was {getattr(__snake_case , __snake_case )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def snake_case ( self : Optional[Any] ): lowerCamelCase :Tuple = self.config_class(**self.inputs_dict ) lowerCamelCase :List[str] = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , __snake_case ) def snake_case ( self : Union[str, Any] ): lowerCamelCase :List[Any] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase :Tuple = os.path.join(__snake_case , '''config.json''' ) config_first.to_json_file(__snake_case ) lowerCamelCase :int = self.config_class.from_json_file(__snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self : List[str] ): lowerCamelCase :Dict = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(__snake_case ) lowerCamelCase :Union[str, Any] = self.config_class.from_pretrained(__snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self : Dict ): lowerCamelCase :List[str] = self.config_class(**self.inputs_dict ) lowerCamelCase :Dict = '''test''' with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase :List[Any] = os.path.join(__snake_case , __snake_case ) config_first.save_pretrained(__snake_case ) lowerCamelCase :Union[str, Any] = self.config_class.from_pretrained(__snake_case , subfolder=__snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case ( self : Union[str, Any] ): lowerCamelCase :Optional[int] = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) lowerCamelCase :Dict = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def snake_case ( self : Optional[Any] ): if self.config_class.is_composition: return lowerCamelCase :Any = self.config_class() self.parent.assertIsNotNone(__snake_case ) def snake_case ( self : str ): lowerCamelCase :Dict = copy.deepcopy(__snake_case ) lowerCamelCase :Optional[Any] = self.config_class(**__snake_case ) lowerCamelCase :Any = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) ) elif getattr(__snake_case , __snake_case ) != value: wrong_values.append((key, getattr(__snake_case , __snake_case ), value) ) if len(__snake_case ) > 0: lowerCamelCase :Tuple = '''\n'''.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] ) raise ValueError(F"The following keys were not properly set in the config:\n{errors}" ) def snake_case ( self : Optional[int] ): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
49
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { """andreasmadsen/efficient_mlm_m0.40""": ( """https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json""" ), } class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'roberta-prelayernorm' def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) lowerCamelCase :Optional[int] = vocab_size lowerCamelCase :Dict = hidden_size lowerCamelCase :Tuple = num_hidden_layers lowerCamelCase :Optional[int] = num_attention_heads lowerCamelCase :Any = hidden_act lowerCamelCase :List[Any] = intermediate_size lowerCamelCase :Union[str, Any] = hidden_dropout_prob lowerCamelCase :str = attention_probs_dropout_prob lowerCamelCase :Tuple = max_position_embeddings lowerCamelCase :int = type_vocab_size lowerCamelCase :Optional[Any] = initializer_range lowerCamelCase :Union[str, Any] = layer_norm_eps lowerCamelCase :Dict = position_embedding_type lowerCamelCase :List[Any] = use_cache lowerCamelCase :Optional[int] = classifier_dropout class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): @property def snake_case ( self : Any ): if self.task == "multiple-choice": lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
49
1
from __future__ import annotations A__ = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class _lowerCAmelCase : def __init__( self : Optional[Any] , __snake_case : dict[str, list[str]] , __snake_case : str ): lowerCamelCase :str = graph # mapping node to its parent in resulting breadth first tree lowerCamelCase :dict[str, str | None] = {} lowerCamelCase :List[Any] = source_vertex def snake_case ( self : int ): lowerCamelCase :Dict = {self.source_vertex} lowerCamelCase :Dict = None lowerCamelCase :Tuple = [self.source_vertex] # first in first out queue while queue: lowerCamelCase :List[str] = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(__snake_case ) lowerCamelCase :int = vertex queue.append(__snake_case ) def snake_case ( self : List[Any] , __snake_case : str ): if target_vertex == self.source_vertex: return self.source_vertex lowerCamelCase :str = self.parent.get(__snake_case ) if target_vertex_parent is None: lowerCamelCase :List[Any] = ( F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(__snake_case ) return self.shortest_path(__snake_case ) + F"->{target_vertex}" if __name__ == "__main__": A__ = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
49
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = DebertaTokenizer _UpperCAmelCase = True _UpperCAmelCase = DebertaTokenizerFast def snake_case ( self : List[str] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase :Dict = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''} lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__snake_case ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__snake_case ) ) def snake_case ( self : str , **__snake_case : Dict ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Optional[Any] , __snake_case : int ): lowerCamelCase :List[Any] = '''lower newer''' lowerCamelCase :List[str] = '''lower newer''' return input_text, output_text def snake_case ( self : str ): lowerCamelCase :Optional[int] = self.get_tokenizer() lowerCamelCase :Union[str, Any] = '''lower newer''' lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) lowerCamelCase :List[str] = tokens + [tokenizer.unk_token] lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case ) def snake_case ( self : Optional[int] ): lowerCamelCase :List[str] = self.get_tokenizer() lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' ) lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __snake_case ) @slow def snake_case ( self : str ): lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case ) lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case ) lowerCamelCase :Union[str, Any] = tokenizer.encode( '''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case ) lowerCamelCase :str = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case ) lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case ) lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def snake_case ( self : str ): lowerCamelCase :List[str] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase :Tuple = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case ) lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']] # fmt: off lowerCamelCase :Any = { '''input_ids''': [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on lowerCamelCase :Optional[int] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __snake_case ) for expected, decoded in zip(__snake_case , __snake_case ): self.assertEqual(__snake_case , __snake_case )
49
1
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _lowerCamelCase ( a_ : Optional[Any] , a_ : str , a_ : Optional[Any]): if isinstance(a_ , torch.Tensor): return image elif isinstance(a_ , PIL.Image.Image): lowerCamelCase :List[Any] = [image] if isinstance(image[0] , PIL.Image.Image): lowerCamelCase :Optional[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image] lowerCamelCase :str = np.concatenate(a_ , axis=0) lowerCamelCase :Tuple = np.array(a_).astype(np.floataa) / 255.0 lowerCamelCase :int = image.transpose(0 , 3 , 1 , 2) lowerCamelCase :str = 2.0 * image - 1.0 lowerCamelCase :Dict = torch.from_numpy(a_) elif isinstance(image[0] , torch.Tensor): lowerCamelCase :Optional[Any] = torch.cat(a_ , dim=0) return image def _lowerCamelCase ( a_ : int , a_ : Dict , a_ : List[str] , a_ : Tuple=0.9_995): if not isinstance(a_ , np.ndarray): lowerCamelCase :List[str] = True lowerCamelCase :int = va.device lowerCamelCase :Union[str, Any] = va.cpu().numpy() lowerCamelCase :List[str] = va.cpu().numpy() lowerCamelCase :Optional[Any] = np.sum(va * va / (np.linalg.norm(a_) * np.linalg.norm(a_))) if np.abs(a_) > DOT_THRESHOLD: lowerCamelCase :int = (1 - t) * va + t * va else: lowerCamelCase :Optional[int] = np.arccos(a_) lowerCamelCase :List[Any] = np.sin(a_) lowerCamelCase :int = theta_a * t lowerCamelCase :Union[str, Any] = np.sin(a_) lowerCamelCase :str = np.sin(theta_a - theta_t) / sin_theta_a lowerCamelCase :Any = sin_theta_t / sin_theta_a lowerCamelCase :Union[str, Any] = sa * va + sa * va if inputs_are_torch: lowerCamelCase :Tuple = torch.from_numpy(a_).to(a_) return va def _lowerCamelCase ( a_ : Optional[int] , a_ : Dict): lowerCamelCase :Dict = F.normalize(a_ , dim=-1) lowerCamelCase :int = F.normalize(a_ , dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def _lowerCamelCase ( a_ : Any , a_ : List[Any]): for param in model.parameters(): lowerCamelCase :List[str] = value class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : Union[str, Any] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : Optional[int]=None , ): super().__init__() self.register_modules( vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , ) lowerCamelCase :Tuple = ( feature_extractor.size if isinstance(feature_extractor.size , __snake_case ) else feature_extractor.size['''shortest_edge'''] ) lowerCamelCase :List[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , __snake_case ) set_requires_grad(self.clip_model , __snake_case ) def snake_case ( self : Union[str, Any] , __snake_case : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCamelCase :Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__snake_case ) def snake_case ( self : Optional[int] ): self.enable_attention_slicing(__snake_case ) def snake_case ( self : List[Any] ): set_requires_grad(self.vae , __snake_case ) def snake_case ( self : Tuple ): set_requires_grad(self.vae , __snake_case ) def snake_case ( self : Any ): set_requires_grad(self.unet , __snake_case ) def snake_case ( self : Union[str, Any] ): set_requires_grad(self.unet , __snake_case ) def snake_case ( self : Any , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Tuple ): # get the original timestep using init_timestep lowerCamelCase :Optional[int] = min(int(num_inference_steps * strength ) , __snake_case ) lowerCamelCase :Optional[int] = max(num_inference_steps - init_timestep , 0 ) lowerCamelCase :List[str] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def snake_case ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Dict , __snake_case : Dict , __snake_case : Dict , __snake_case : List[str]=None ): if not isinstance(__snake_case , torch.Tensor ): raise ValueError(F"`image` has to be of type `torch.Tensor` but is {type(__snake_case )}" ) lowerCamelCase :Dict = image.to(device=__snake_case , dtype=__snake_case ) if isinstance(__snake_case , __snake_case ): lowerCamelCase :Any = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case ) ] lowerCamelCase :Union[str, Any] = torch.cat(__snake_case , dim=0 ) else: lowerCamelCase :Tuple = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase :Dict = 0.1_8_2_1_5 * init_latents lowerCamelCase :Union[str, Any] = init_latents.repeat_interleave(__snake_case , dim=0 ) lowerCamelCase :Optional[int] = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case ) # get latents lowerCamelCase :int = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case ) lowerCamelCase :Optional[Any] = init_latents return latents def snake_case ( self : str , __snake_case : Tuple ): lowerCamelCase :Any = self.coca_transform(__snake_case ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): lowerCamelCase :str = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) lowerCamelCase :Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' ) def snake_case ( self : Tuple , __snake_case : List[str] , __snake_case : List[str] ): lowerCamelCase :List[Any] = self.feature_extractor.preprocess(__snake_case ) lowerCamelCase :List[Any] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half() lowerCamelCase :Optional[int] = self.clip_model.get_image_features(__snake_case ) lowerCamelCase :int = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) lowerCamelCase :Dict = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 ) return image_embeddings_clip @torch.enable_grad() def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] , ): lowerCamelCase :List[Any] = latents.detach().requires_grad_() lowerCamelCase :Union[str, Any] = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual lowerCamelCase :Dict = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): lowerCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep] lowerCamelCase :Any = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase :Optional[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 lowerCamelCase :Any = torch.sqrt(__snake_case ) lowerCamelCase :Dict = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , __snake_case ): lowerCamelCase :Optional[Any] = self.scheduler.sigmas[index] lowerCamelCase :Tuple = latents - sigma * noise_pred else: raise ValueError(F"scheduler type {type(self.scheduler )} not supported" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase :int = 1 / 0.1_8_2_1_5 * sample lowerCamelCase :int = self.vae.decode(__snake_case ).sample lowerCamelCase :Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase :int = transforms.Resize(self.feature_extractor_size )(__snake_case ) lowerCamelCase :str = self.normalize(__snake_case ).to(latents.dtype ) lowerCamelCase :List[Any] = self.clip_model.get_image_features(__snake_case ) lowerCamelCase :Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) lowerCamelCase :str = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale lowerCamelCase :Tuple = -torch.autograd.grad(__snake_case , __snake_case )[0] if isinstance(self.scheduler , __snake_case ): lowerCamelCase :Union[str, Any] = latents.detach() + grads * (sigma**2) lowerCamelCase :Dict = noise_pred_original else: lowerCamelCase :Dict = noise_pred_original - torch.sqrt(__snake_case ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Tuple , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 512 , __snake_case : Optional[int] = 512 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 100 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ): if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size: raise ValueError(F"You have passed {batch_size} batch_size, but only {len(__snake_case )} generators." ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if isinstance(__snake_case , torch.Generator ) and batch_size > 1: lowerCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1) lowerCamelCase :Optional[Any] = [ ('''model''', self.coca_model is None), ('''tokenizer''', self.coca_tokenizer is None), ('''transform''', self.coca_transform is None), ] lowerCamelCase :str = [x[0] for x in coca_is_none if x[1]] lowerCamelCase :List[Any] = ''', '''.join(__snake_case ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(__snake_case ): raise ValueError( F"Content prompt is None and CoCa [{coca_is_none_str}] is None." F"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) lowerCamelCase :Optional[Any] = self.get_image_description(__snake_case ) if style_prompt is None: if len(__snake_case ): raise ValueError( F"Style prompt is None and CoCa [{coca_is_none_str}] is None." F" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) lowerCamelCase :Optional[Any] = self.get_image_description(__snake_case ) # get prompt text embeddings for content and style lowerCamelCase :Optional[Any] = self.tokenizer( __snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , ) lowerCamelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] lowerCamelCase :Union[str, Any] = self.tokenizer( __snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , ) lowerCamelCase :List[Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] lowerCamelCase :Tuple = slerp(__snake_case , __snake_case , __snake_case ) # duplicate text embeddings for each generation per prompt lowerCamelCase :List[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 ) # set timesteps lowerCamelCase :Any = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) lowerCamelCase :int = {} if accepts_offset: lowerCamelCase :Dict = 1 self.scheduler.set_timesteps(__snake_case , **__snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) lowerCamelCase , lowerCamelCase :Dict = self.get_timesteps(__snake_case , __snake_case , self.device ) lowerCamelCase :Optional[Any] = timesteps[:1].repeat(__snake_case ) # Preprocess image lowerCamelCase :List[Any] = preprocess(__snake_case , __snake_case , __snake_case ) lowerCamelCase :Union[str, Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) lowerCamelCase :str = preprocess(__snake_case , __snake_case , __snake_case ) lowerCamelCase :Dict = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) lowerCamelCase :Tuple = slerp(__snake_case , __snake_case , __snake_case ) if clip_guidance_scale > 0: lowerCamelCase :Optional[int] = self.get_clip_image_embeddings(__snake_case , __snake_case ) lowerCamelCase :Dict = self.get_clip_image_embeddings(__snake_case , __snake_case ) lowerCamelCase :List[str] = slerp( __snake_case , __snake_case , __snake_case ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowerCamelCase :Tuple = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowerCamelCase :List[Any] = content_text_input.input_ids.shape[-1] lowerCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=__snake_case , return_tensors='''pt''' ) lowerCamelCase :Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt lowerCamelCase :Any = uncond_embeddings.repeat_interleave(__snake_case , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCamelCase :Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowerCamelCase :Union[str, Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) lowerCamelCase :Union[str, Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps lowerCamelCase :Any = torch.randn(__snake_case , generator=__snake_case , device='''cpu''' , dtype=__snake_case ).to( self.device ) else: lowerCamelCase :Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case ) else: if latents.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) lowerCamelCase :List[str] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase :List[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase :Tuple = {} if accepts_eta: lowerCamelCase :List[Any] = eta # check if the scheduler accepts generator lowerCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: lowerCamelCase :Union[str, Any] = generator with self.progress_bar(total=__snake_case ): for i, t in enumerate(__snake_case ): # expand the latents if we are doing classifier free guidance lowerCamelCase :Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCamelCase :List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual lowerCamelCase :List[str] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample # perform classifier free guidance if do_classifier_free_guidance: lowerCamelCase , lowerCamelCase :Optional[int] = noise_pred.chunk(2 ) lowerCamelCase :Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: lowerCamelCase :List[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) lowerCamelCase , lowerCamelCase :Optional[Any] = self.cond_fn( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase :int = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase :List[str] = 1 / 0.1_8_2_1_5 * latents lowerCamelCase :Optional[int] = self.vae.decode(__snake_case ).sample lowerCamelCase :Dict = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase :Any = self.numpy_to_pil(__snake_case ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
49
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) A__ = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): lowerCamelCase :Tuple = None lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) lowerCamelCase :Optional[int] = os.path.abspath('''examples''' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ): lowerCamelCase :Union[str, Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) lowerCamelCase :int = '''\n'''.join(__snake_case ) if special_strings is not None: for string in special_strings: lowerCamelCase :int = diff.replace(__snake_case , '''''' ) self.assertEqual(__snake_case , '''''' ) def snake_case ( self : Dict ): self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) lowerCamelCase :Optional[int] = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} ) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = False @classmethod def snake_case ( cls : Optional[Any] ): super().setUpClass() lowerCamelCase :Any = tempfile.mkdtemp() lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case ( cls : Dict ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def snake_case ( self : int ): lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def snake_case ( self : List[Any] ): lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() lowerCamelCase :List[Any] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def snake_case ( self : List[str] ): lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split() lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) def snake_case ( self : str ): lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split() lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): lowerCamelCase :Union[str, Any] = torch.cuda.device_count() else: lowerCamelCase :Dict = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) else: self.assertIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) @slow def snake_case ( self : Any ): lowerCamelCase :Tuple = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case ) lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1] lowerCamelCase :List[str] = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def snake_case ( self : int ): lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : Any ): with tempfile.TemporaryDirectory() as tmpdir: lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) ) def snake_case ( self : Tuple ): lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def snake_case ( self : Optional[Any] ): lowerCamelCase :int = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
49
1
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) A__ = logging.getLogger(__name__) A__ = """Hello world! cécé herlolip""" A__ = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def _lowerCamelCase ( a_ : str , a_ : str): lowerCamelCase :List[Any] = BertAbsConfig( temp_dir='''.''' , finetune_bert=a_ , large=a_ , share_emb=a_ , use_bert_emb=a_ , encoder='''bert''' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) lowerCamelCase :Union[str, Any] = torch.load(a_ , lambda a_ , a_: storage) lowerCamelCase :Optional[int] = AbsSummarizer(a_ , torch.device('''cpu''') , a_) original.eval() lowerCamelCase :Dict = BertAbsSummarizer(a_ , torch.device('''cpu''')) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''') new_model.bert.load_state_dict(original.bert.state_dict()) new_model.decoder.load_state_dict(original.decoder.state_dict()) new_model.generator.load_state_dict(original.generator.state_dict()) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''') lowerCamelCase :Any = BertTokenizer.from_pretrained('''bert-base-uncased''') # prepare the model inputs lowerCamelCase :Union[str, Any] = tokenizer.encode('''This is sample éàalj\'-.''') encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(a_))) lowerCamelCase :List[Any] = torch.tensor(a_).unsqueeze(0) lowerCamelCase :Dict = tokenizer.encode('''This is sample 3 éàalj\'-.''') decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(a_))) lowerCamelCase :Any = torch.tensor(a_).unsqueeze(0) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0 # forward pass lowerCamelCase :List[Any] = encoder_input_ids lowerCamelCase :Dict = decoder_input_ids lowerCamelCase :List[Any] = None lowerCamelCase :int = None lowerCamelCase :List[str] = None lowerCamelCase :Optional[Any] = None lowerCamelCase :Dict = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical lowerCamelCase :List[Any] = original(a_ , a_ , a_ , a_ , a_ , a_ , a_)[0] lowerCamelCase :List[str] = original.generator(a_) lowerCamelCase :Union[str, Any] = new_model( a_ , a_ , a_ , a_ , a_)[0] lowerCamelCase :Union[str, Any] = new_model.generator(a_) lowerCamelCase :Dict = torch.max(torch.abs(output_converted_model - output_original_model)).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(a_)) lowerCamelCase :str = torch.max(torch.abs(output_converted_generator - output_original_generator)).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(a_)) lowerCamelCase :Tuple = torch.allclose(a_ , a_ , atol=1e-3) if are_identical: logging.info('''all weights are equal up to 1e-3''') else: raise ValueError('''the weights are different. The new model is likely different from the original one.''') # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''') torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''') if __name__ == "__main__": A__ = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) A__ = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
49
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""") A__ = cvtColor(img, COLOR_BGR2GRAY) def _lowerCamelCase ( ): lowerCamelCase :int = cn.convert_to_negative(a_) # assert negative_img array for at least one True assert negative_img.any() def _lowerCamelCase ( ): with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img: # Work around assertion for response assert str(cc.change_contrast(a_ , 1_10)).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''') def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4) # Assert ambiguous array assert resp.all() def _lowerCamelCase ( ): lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0) # assert ambiguous array for all == True assert canny_img.all() lowerCamelCase :Optional[Any] = canny.canny(a_) # assert canny array for at least one True assert canny_array.any() def _lowerCamelCase ( ): assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all() def _lowerCamelCase ( ): # laplace diagonals lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_) assert res.any() def _lowerCamelCase ( ): assert med.median_filter(a_ , 3).any() def _lowerCamelCase ( ): lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_) assert grad.any() and theta.any() def _lowerCamelCase ( ): lowerCamelCase :Dict = sp.make_sepia(a_ , 20) assert sepia.all() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"): lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20) burkes.process() assert burkes.output_img.any() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ): lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00) nn.process() assert nn.output.any() def _lowerCamelCase ( ): lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. lowerCamelCase :Tuple = imread(a_ , 0) # Test for get_neighbors_pixel function() return not None lowerCamelCase :Dict = 0 lowerCamelCase :Optional[Any] = 0 lowerCamelCase :str = image[x_coordinate][y_coordinate] lowerCamelCase :Any = lbp.get_neighbors_pixel( a_ , a_ , a_ , a_) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1])) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0]): for j in range(0 , image.shape[1]): lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_) assert lbp_image.any()
49
1
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def _lowerCamelCase ( a_ : List[Any] , a_ : List[str] , a_ : Optional[Any]): lowerCamelCase :List[Any] = 1.5 lowerCamelCase :Any = int(factor * num_class_images) lowerCamelCase :List[str] = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=a_ , aesthetic_weight=0.1) os.makedirs(F"{class_data_dir}/images" , exist_ok=a_) if len(list(Path(F"{class_data_dir}/images").iterdir())) >= num_class_images: return while True: lowerCamelCase :Dict = client.query(text=a_) if len(a_) >= factor * num_class_images or num_images > 1e4: break else: lowerCamelCase :Tuple = int(factor * num_images) lowerCamelCase :Optional[int] = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=a_ , aesthetic_weight=0.1 , ) lowerCamelCase :List[str] = 0 lowerCamelCase :Tuple = 0 lowerCamelCase :Union[str, Any] = tqdm(desc='''downloading real regularization images''' , total=a_) with open(F"{class_data_dir}/caption.txt" , '''w''') as fa, open(F"{class_data_dir}/urls.txt" , '''w''') as fa, open( F"{class_data_dir}/images.txt" , '''w''') as fa: while total < num_class_images: lowerCamelCase :List[Any] = class_images[count] count += 1 try: lowerCamelCase :Dict = requests.get(images['''url''']) if img.status_code == 2_00: lowerCamelCase :Any = Image.open(BytesIO(img.content)) with open(F"{class_data_dir}/images/{total}.jpg" , '''wb''') as f: f.write(img.content) fa.write(images['''caption'''] + '''\n''') fa.write(images['''url'''] + '''\n''') fa.write(F"{class_data_dir}/images/{total}.jpg" + '''\n''') total += 1 pbar.update(1) else: continue except Exception: continue return def _lowerCamelCase ( ): lowerCamelCase :List[Any] = argparse.ArgumentParser('''''' , add_help=a_) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=a_ , type=a_) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=a_ , type=a_) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=a_) return parser.parse_args() if __name__ == "__main__": A__ = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
49
import os from math import logaa def _lowerCamelCase ( a_ : str = "base_exp.txt"): lowerCamelCase :float = 0 lowerCamelCase :Optional[int] = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))): lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''','''))) if x * logaa(a_) > largest: lowerCamelCase :List[Any] = x * logaa(a_) lowerCamelCase :Any = i + 1 return result if __name__ == "__main__": print(solution())
49
1
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Tuple ): lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils ) lowerCamelCase :Dict = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase :Any = test_metrics @require_cpu def snake_case ( self : Dict ): debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def snake_case ( self : int ): debug_launcher(self.test_metrics.main ) @require_single_gpu def snake_case ( self : Any ): self.test_metrics.main() @require_multi_gpu def snake_case ( self : Optional[int] ): print(F"Found {torch.cuda.device_count()} devices." ) lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__snake_case , env=os.environ.copy() )
49
def _lowerCamelCase ( a_ : list): if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''') for cell_n in range(1 , len(grid[0])): grid[0][cell_n] += grid[0][cell_n - 1] lowerCamelCase :Any = grid[0] for row_n in range(1 , len(a_)): lowerCamelCase :List[str] = grid[row_n] lowerCamelCase :Union[str, Any] = fill_row(a_ , a_) lowerCamelCase :List[Any] = grid[row_n] return grid[-1][-1] def _lowerCamelCase ( a_ : list , a_ : list): current_row[0] += row_above[0] for cell_n in range(1 , len(a_)): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n]) return current_row if __name__ == "__main__": import doctest doctest.testmod()
49
1
from __future__ import annotations def _lowerCamelCase ( a_ : list , a_ : int): # Checks if the entire collection has been sorted if len(a_) <= 1 or n <= 1: return insert_next(a_ , n - 1) rec_insertion_sort(a_ , n - 1) def _lowerCamelCase ( a_ : list , a_ : int): # Checks order between adjacent elements if index >= len(a_) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order lowerCamelCase , lowerCamelCase :int = ( collection[index], collection[index - 1], ) insert_next(a_ , index + 1) if __name__ == "__main__": A__ = input("""Enter integers separated by spaces: """) A__ = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
49
import math def _lowerCamelCase ( a_ : int): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a_) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCamelCase ( a_ : float = 0.1): lowerCamelCase :Dict = 3 lowerCamelCase :List[Any] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1): primes += is_prime(a_) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
49
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization _UpperCAmelCase = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} ) _UpperCAmelCase = Features({'text': Value('string' )} ) _UpperCAmelCase = Features({'summary': Value('string' )} ) _UpperCAmelCase = "text" _UpperCAmelCase = "summary" @property def snake_case ( self : Optional[int] ): return {self.text_column: "text", self.summary_column: "summary"}
49
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[Any] = -1 lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :str = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :str = TextStreamer(__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :Optional[int] = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Dict ): lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[Any] = -1 lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] ) lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case ) lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() lowerCamelCase :Any = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[str] = -1 lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :] lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :int = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Optional[int] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case ) model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n" lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def snake_case ( self : List[Any] ): lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 ) lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__snake_case ): lowerCamelCase :Dict = '''''' for new_text in streamer: streamer_text += new_text
49
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = StableDiffusionXLImgaImgPipeline _UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} _UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'latents'} _UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def snake_case ( self : Dict ): torch.manual_seed(0 ) lowerCamelCase :List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) lowerCamelCase :str = EulerDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) lowerCamelCase :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase :Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , ) lowerCamelCase :List[str] = CLIPTextModel(__snake_case ) lowerCamelCase :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__snake_case ) lowerCamelCase :Union[str, Any] = CLIPTextModelWithProjection(__snake_case ) lowerCamelCase :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__snake_case ) lowerCamelCase :Dict = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def snake_case ( self : Tuple , __snake_case : Optional[int] , __snake_case : Any=0 ): lowerCamelCase :str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case ) lowerCamelCase :Union[str, Any] = image / 2 + 0.5 if str(__snake_case ).startswith('''mps''' ): lowerCamelCase :Optional[Any] = torch.manual_seed(__snake_case ) else: lowerCamelCase :Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) lowerCamelCase :Union[str, Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.7_5, } return inputs def snake_case ( self : List[Any] ): lowerCamelCase :List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase :str = self.get_dummy_components() lowerCamelCase :Dict = StableDiffusionXLImgaImgPipeline(**__snake_case ) lowerCamelCase :List[str] = sd_pipe.to(__snake_case ) sd_pipe.set_progress_bar_config(disable=__snake_case ) lowerCamelCase :Optional[Any] = self.get_dummy_inputs(__snake_case ) lowerCamelCase :List[str] = sd_pipe(**__snake_case ).images lowerCamelCase :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase :str = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case ( self : Optional[Any] ): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def snake_case ( self : int ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def snake_case ( self : Tuple ): pass def snake_case ( self : str ): lowerCamelCase :Optional[int] = self.get_dummy_components() lowerCamelCase :int = StableDiffusionXLImgaImgPipeline(**__snake_case ) lowerCamelCase :Any = sd_pipe.to(__snake_case ) lowerCamelCase :str = sd_pipe.to(__snake_case ) sd_pipe.set_progress_bar_config(disable=__snake_case ) # forward without prompt embeds lowerCamelCase :Dict = self.get_dummy_inputs(__snake_case ) lowerCamelCase :Tuple = 3 * ['''this is a negative prompt'''] lowerCamelCase :Tuple = negative_prompt lowerCamelCase :List[Any] = 3 * [inputs['''prompt''']] lowerCamelCase :str = sd_pipe(**__snake_case ) lowerCamelCase :Optional[int] = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCamelCase :Any = self.get_dummy_inputs(__snake_case ) lowerCamelCase :Dict = 3 * ['''this is a negative prompt'''] lowerCamelCase :Any = 3 * [inputs.pop('''prompt''' )] ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) :Optional[Any] = sd_pipe.encode_prompt(__snake_case , negative_prompt=__snake_case ) lowerCamelCase :Any = sd_pipe( **__snake_case , prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , pooled_prompt_embeds=__snake_case , negative_pooled_prompt_embeds=__snake_case , ) lowerCamelCase :str = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : str ): super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Optional[int] , __snake_case : List[str] , __snake_case : Dict="cpu" , __snake_case : Dict=torch.floataa , __snake_case : Optional[Any]=0 ): lowerCamelCase :Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) lowerCamelCase :Dict = np.random.RandomState(__snake_case ).standard_normal((1, 4, 64, 64) ) lowerCamelCase :int = torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case ) lowerCamelCase :Optional[int] = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def snake_case ( self : List[str] ): lowerCamelCase :List[Any] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) lowerCamelCase :Dict = self.get_inputs(__snake_case ) lowerCamelCase :Any = pipe(**__snake_case ).images lowerCamelCase :str = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase :Dict = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
49
from maths.prime_factors import prime_factors def _lowerCamelCase ( a_ : int): if not isinstance(a_ , a_): lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer" raise TypeError(a_) if number < 1: raise ValueError('''Input must be a positive integer''') return -1 if len(prime_factors(a_)) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
49
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A__ = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() A__ = logging.get_logger(__name__) def _lowerCamelCase ( a_ : str , a_ : str=False): lowerCamelCase :Optional[int] = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''')) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''')) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''')) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''')) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''')) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''')) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''')) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias")) # transformer encoder for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias")) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ]) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ]) # fmt: on return rename_keys def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False): for i in range(config.num_hidden_layers): if base_model: lowerCamelCase :Union[str, Any] = '''''' else: lowerCamelCase :Optional[int] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight") lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict lowerCamelCase :Any = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size] lowerCamelCase :int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase :Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase :Optional[Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :] def _lowerCamelCase ( a_ : int): lowerCamelCase :Any = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a_ , a_) def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple): lowerCamelCase :Optional[Any] = dct.pop(a_) lowerCamelCase :str = val def _lowerCamelCase ( ): lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw) return im @torch.no_grad() def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False): lowerCamelCase :Optional[int] = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , ) lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00) lowerCamelCase :List[Any] = False # load original model from timm lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCamelCase :List[str] = timm_model.state_dict() if base_model: remove_classification_head_(a_) lowerCamelCase :Tuple = create_rename_keys(a_ , a_) for src, dest in rename_keys: rename_key(a_ , a_ , a_) read_in_q_k_v(a_ , a_ , a_) lowerCamelCase :List[str] = '''huggingface/label-files''' lowerCamelCase :Any = '''imagenet-1k-id2label.json''' lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r''')) lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()} lowerCamelCase :Optional[int] = idalabel lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval() else: lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval() model.load_state_dict(a_) # create image processor lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_)) lowerCamelCase :str = transform.transforms lowerCamelCase :int = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowerCamelCase :Any = ViTHybridImageProcessor( do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowerCamelCase :Dict = prepare_img() lowerCamelCase :str = transform(a_).unsqueeze(0) lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values # verify pixel values assert torch.allclose(a_ , a_) # verify logits with torch.no_grad(): lowerCamelCase :Optional[int] = model(a_) lowerCamelCase :Union[str, Any] = outputs.logits print('''Predicted class:''' , logits.argmax(-1).item()) if base_model: lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3) else: lowerCamelCase :List[str] = timm_model(a_) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a_ , outputs.logits , atol=1e-3) print('''Looks ok!''') if pytorch_dump_folder_path is not None: Path(a_).mkdir(exist_ok=a_) print(F"Saving model {vit_name} to {pytorch_dump_folder_path}") model.save_pretrained(a_) print(F"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(a_) if push_to_hub: print(F"Pushing model and processor to the hub {vit_name}") model.push_to_hub(F"ybelkada/{vit_name}") processor.push_to_hub(F"ybelkada/{vit_name}") if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) A__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
49
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__ = logging.get_logger(__name__) A__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} A__ = { """vocab_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), }, """merges_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), }, } A__ = { """allenai/longformer-base-4096""": 4_096, """allenai/longformer-large-4096""": 4_096, """allenai/longformer-large-4096-finetuned-triviaqa""": 4_096, """allenai/longformer-base-4096-extra.pos.embd.only""": 4_096, """allenai/longformer-large-4096-extra.pos.embd.only""": 4_096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = ( list(range(ord('''!''') , ord('''~''') + 1)) + list(range(ord('''¡''') , ord('''¬''') + 1)) + list(range(ord('''®''') , ord('''ÿ''') + 1)) ) lowerCamelCase :Union[str, Any] = bs[:] lowerCamelCase :List[str] = 0 for b in range(2**8): if b not in bs: bs.append(a_) cs.append(2**8 + n) n += 1 lowerCamelCase :Union[str, Any] = [chr(a_) for n in cs] return dict(zip(a_ , a_)) def _lowerCamelCase ( a_ : List[Any]): lowerCamelCase :Optional[Any] = set() lowerCamelCase :str = word[0] for char in word[1:]: pairs.add((prev_char, char)) lowerCamelCase :Union[str, Any] = char return pairs class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ['input_ids', 'attention_mask'] def __init__( self : Optional[int] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Dict="replace" , __snake_case : List[Any]="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : int="</s>" , __snake_case : Any="<s>" , __snake_case : List[str]="<unk>" , __snake_case : Optional[Any]="<pad>" , __snake_case : Optional[Any]="<mask>" , __snake_case : Union[str, Any]=False , **__snake_case : int , ): lowerCamelCase :int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token lowerCamelCase :Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token lowerCamelCase :str = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token lowerCamelCase :Any = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token lowerCamelCase :Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token lowerCamelCase :Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase :Any = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token super().__init__( errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , ) with open(__snake_case , encoding='''utf-8''' ) as vocab_handle: lowerCamelCase :Tuple = json.load(__snake_case ) lowerCamelCase :int = {v: k for k, v in self.encoder.items()} lowerCamelCase :Tuple = errors # how to handle errors in decoding lowerCamelCase :List[str] = bytes_to_unicode() lowerCamelCase :List[str] = {v: k for k, v in self.byte_encoder.items()} with open(__snake_case , encoding='''utf-8''' ) as merges_handle: lowerCamelCase :Tuple = merges_handle.read().split('''\n''' )[1:-1] lowerCamelCase :Dict = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase :int = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowerCamelCase :Dict = {} lowerCamelCase :Optional[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase :List[str] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def snake_case ( self : Any ): return len(self.encoder ) def snake_case ( self : Any ): return dict(self.encoder , **self.added_tokens_encoder ) def snake_case ( self : List[str] , __snake_case : Tuple ): if token in self.cache: return self.cache[token] lowerCamelCase :Any = tuple(__snake_case ) lowerCamelCase :List[Any] = get_pairs(__snake_case ) if not pairs: return token while True: lowerCamelCase :List[Any] = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase , lowerCamelCase :Any = bigram lowerCamelCase :int = [] lowerCamelCase :Tuple = 0 while i < len(__snake_case ): try: lowerCamelCase :Optional[int] = word.index(__snake_case , __snake_case ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase :List[Any] = j if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase :Optional[int] = tuple(__snake_case ) lowerCamelCase :List[Any] = new_word if len(__snake_case ) == 1: break else: lowerCamelCase :int = get_pairs(__snake_case ) lowerCamelCase :Tuple = ''' '''.join(__snake_case ) lowerCamelCase :List[str] = word return word def snake_case ( self : List[str] , __snake_case : str ): lowerCamelCase :Optional[int] = [] for token in re.findall(self.pat , __snake_case ): lowerCamelCase :Optional[Any] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(''' ''' ) ) return bpe_tokens def snake_case ( self : Optional[int] , __snake_case : Tuple ): return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) ) def snake_case ( self : str , __snake_case : List[Any] ): return self.decoder.get(__snake_case ) def snake_case ( self : Any , __snake_case : Any ): lowerCamelCase :str = ''''''.join(__snake_case ) lowerCamelCase :int = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def snake_case ( self : List[str] , __snake_case : str , __snake_case : Optional[str] = None ): if not os.path.isdir(__snake_case ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase :int = os.path.join( __snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase :List[Any] = os.path.join( __snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' ) lowerCamelCase :Optional[int] = 0 with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ''' Please check that the tokenizer is not corrupted!''' ) lowerCamelCase :Optional[int] = token_index writer.write(''' '''.join(__snake_case ) + '''\n''' ) index += 1 return vocab_file, merge_file def snake_case ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase :Tuple = [self.cls_token_id] lowerCamelCase :int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) if token_ids_a is None: return [1] + ([0] * len(__snake_case )) + [1] return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1] def snake_case ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): lowerCamelCase :Optional[int] = [self.sep_token_id] lowerCamelCase :Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Tuple=False , **__snake_case : Any ): lowerCamelCase :List[str] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()): lowerCamelCase :int = ''' ''' + text return (text, kwargs)
49
def _lowerCamelCase ( a_ : int = 4_00_00_00): lowerCamelCase :Dict = [0, 1] lowerCamelCase :Optional[Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1]) if fib[i + 2] > n: break i += 1 lowerCamelCase :Dict = 0 for j in range(len(a_) - 1): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'{solution() = }')
49
1
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = (EulerDiscreteScheduler,) _UpperCAmelCase = 1_0 def snake_case ( self : Dict , **__snake_case : Tuple ): lowerCamelCase :List[Any] = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**__snake_case ) return config def snake_case ( self : Any ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__snake_case ) def snake_case ( self : List[str] ): for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case ) def snake_case ( self : Dict ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__snake_case ) def snake_case ( self : int ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__snake_case ) def snake_case ( self : List[Any] ): lowerCamelCase :int = self.scheduler_classes[0] lowerCamelCase :List[Any] = self.get_scheduler_config() lowerCamelCase :List[str] = scheduler_class(**__snake_case ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase :Optional[Any] = torch.manual_seed(0 ) lowerCamelCase :List[str] = self.dummy_model() lowerCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase :List[str] = sample.to(__snake_case ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase :Optional[int] = scheduler.scale_model_input(__snake_case , __snake_case ) lowerCamelCase :Dict = model(__snake_case , __snake_case ) lowerCamelCase :List[str] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ) lowerCamelCase :Any = output.prev_sample lowerCamelCase :Optional[int] = torch.sum(torch.abs(__snake_case ) ) lowerCamelCase :Tuple = torch.mean(torch.abs(__snake_case ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def snake_case ( self : str ): lowerCamelCase :Optional[Any] = self.scheduler_classes[0] lowerCamelCase :List[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCamelCase :Dict = scheduler_class(**__snake_case ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase :Optional[int] = torch.manual_seed(0 ) lowerCamelCase :Union[str, Any] = self.dummy_model() lowerCamelCase :str = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase :int = sample.to(__snake_case ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase :int = scheduler.scale_model_input(__snake_case , __snake_case ) lowerCamelCase :List[str] = model(__snake_case , __snake_case ) lowerCamelCase :Optional[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ) lowerCamelCase :str = output.prev_sample lowerCamelCase :List[Any] = torch.sum(torch.abs(__snake_case ) ) lowerCamelCase :str = torch.mean(torch.abs(__snake_case ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2 assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3 def snake_case ( self : Optional[int] ): lowerCamelCase :Optional[int] = self.scheduler_classes[0] lowerCamelCase :Any = self.get_scheduler_config() lowerCamelCase :List[str] = scheduler_class(**__snake_case ) scheduler.set_timesteps(self.num_inference_steps , device=__snake_case ) lowerCamelCase :Union[str, Any] = torch.manual_seed(0 ) lowerCamelCase :str = self.dummy_model() lowerCamelCase :int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowerCamelCase :List[Any] = sample.to(__snake_case ) for t in scheduler.timesteps: lowerCamelCase :Union[str, Any] = scheduler.scale_model_input(__snake_case , __snake_case ) lowerCamelCase :int = model(__snake_case , __snake_case ) lowerCamelCase :Any = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ) lowerCamelCase :Tuple = output.prev_sample lowerCamelCase :List[str] = torch.sum(torch.abs(__snake_case ) ) lowerCamelCase :Tuple = torch.mean(torch.abs(__snake_case ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def snake_case ( self : int ): lowerCamelCase :Union[str, Any] = self.scheduler_classes[0] lowerCamelCase :Tuple = self.get_scheduler_config() lowerCamelCase :List[str] = scheduler_class(**__snake_case , use_karras_sigmas=__snake_case ) scheduler.set_timesteps(self.num_inference_steps , device=__snake_case ) lowerCamelCase :int = torch.manual_seed(0 ) lowerCamelCase :List[str] = self.dummy_model() lowerCamelCase :Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowerCamelCase :Optional[int] = sample.to(__snake_case ) for t in scheduler.timesteps: lowerCamelCase :str = scheduler.scale_model_input(__snake_case , __snake_case ) lowerCamelCase :Optional[int] = model(__snake_case , __snake_case ) lowerCamelCase :int = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ) lowerCamelCase :Optional[Any] = output.prev_sample lowerCamelCase :str = torch.sum(torch.abs(__snake_case ) ) lowerCamelCase :List[str] = torch.mean(torch.abs(__snake_case ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
49
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ = { """configuration_nllb_moe""": [ """NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NllbMoeConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""", """NllbMoeForConditionalGeneration""", """NllbMoeModel""", """NllbMoePreTrainedModel""", """NllbMoeTop2Router""", """NllbMoeSparseMLP""", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
1
from collections import defaultdict def _lowerCamelCase ( a_ : int): lowerCamelCase :str = 1 lowerCamelCase :Dict = True for v in tree[start]: if v not in visited: ret += dfs(a_) if ret % 2 == 0: cuts.append(a_) return ret def _lowerCamelCase ( ): dfs(1) if __name__ == "__main__": A__ , A__ = 10, 9 A__ = defaultdict(list) A__ = {} A__ = [] A__ = 0 A__ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
49
import numpy class _lowerCAmelCase : def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ): lowerCamelCase :Dict = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCamelCase :Dict = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCamelCase :Dict = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCamelCase :Any = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCamelCase :Union[str, Any] = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCamelCase :List[str] = numpy.zeros(output_array.shape ) def snake_case ( self : Optional[int] ): lowerCamelCase :Any = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCamelCase :Any = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCamelCase :Dict = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def snake_case ( self : Any ): lowerCamelCase :Union[str, Any] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCamelCase :Dict = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCamelCase :int = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ): for iteration in range(1 , iterations + 1 ): lowerCamelCase :Union[str, Any] = self.feedforward() self.back_propagation() if give_loss: lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F"Iteration {iteration} Loss: {loss}" ) def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ): lowerCamelCase :int = input_arr lowerCamelCase :Union[str, Any] = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCamelCase :Optional[Any] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCamelCase :Optional[int] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _lowerCamelCase ( a_ : numpy.ndarray): return 1 / (1 + numpy.exp(-value)) def _lowerCamelCase ( a_ : numpy.ndarray): return (value) * (1 - (value)) def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa) # Calling neural network class. lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork( input_array=a_ , output_array=a_) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=a_ , iterations=10 , give_loss=a_) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa)) if __name__ == "__main__": example()
49
1
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A__ = pd.read_csv("""sample_data.csv""", header=None) A__ = df.shape[:1][0] # If you're using some other dataset input the target column A__ = df.iloc[:, 1:2] A__ = actual_data.values.reshape(len_data, 1) A__ = MinMaxScaler().fit_transform(actual_data) A__ = 10 A__ = 5 A__ = 20 A__ = len_data - periods * look_back A__ = actual_data[:division] A__ = actual_data[division - look_back :] A__ , A__ = [], [] A__ , A__ = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A__ = np.array(train_x) A__ = np.array(test_x) A__ = np.array([list(i.ravel()) for i in train_y]) A__ = np.array([list(i.ravel()) for i in test_y]) A__ = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") A__ = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) A__ = model.predict(x_test)
49
def _lowerCamelCase ( a_ : str , a_ : str): lowerCamelCase :List[str] = len(a_) lowerCamelCase :List[str] = len(a_) lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)] lowerCamelCase :Optional[Any] = True for i in range(a_): for j in range(m + 1): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowerCamelCase :Any = True if a[i].islower(): lowerCamelCase :List[str] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
49
1
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 42 class _lowerCAmelCase ( nn.Module ): def __init__( self : Optional[int] , __snake_case : Optional[int]=3 , __snake_case : Optional[int]=3 , __snake_case : Optional[Any]=("DownEncoderBlock2D",) , __snake_case : Tuple=(64,) , __snake_case : Optional[int]=2 , __snake_case : List[Any]=32 , __snake_case : Dict="silu" , __snake_case : int=True , ): super().__init__() lowerCamelCase :Optional[Any] = layers_per_block lowerCamelCase :List[str] = torch.nn.Convad( __snake_case , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) lowerCamelCase :Optional[int] = None lowerCamelCase :str = nn.ModuleList([] ) # down lowerCamelCase :Tuple = block_out_channels[0] for i, down_block_type in enumerate(__snake_case ): lowerCamelCase :Union[str, Any] = output_channel lowerCamelCase :Tuple = block_out_channels[i] lowerCamelCase :Dict = i == len(__snake_case ) - 1 lowerCamelCase :int = get_down_block( __snake_case , num_layers=self.layers_per_block , in_channels=__snake_case , out_channels=__snake_case , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__snake_case , resnet_groups=__snake_case , attention_head_dim=__snake_case , temb_channels=__snake_case , ) self.down_blocks.append(__snake_case ) # mid lowerCamelCase :List[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__snake_case , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=__snake_case , temb_channels=__snake_case , ) # out lowerCamelCase :Optional[int] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__snake_case , eps=1e-6 ) lowerCamelCase :Union[str, Any] = nn.SiLU() lowerCamelCase :Any = 2 * out_channels if double_z else out_channels lowerCamelCase :Optional[int] = nn.Convad(block_out_channels[-1] , __snake_case , 3 , padding=1 ) lowerCamelCase :str = False def snake_case ( self : str , __snake_case : Optional[Any] ): lowerCamelCase :Optional[int] = x lowerCamelCase :Optional[Any] = self.conv_in(__snake_case ) if self.training and self.gradient_checkpointing: def create_custom_forward(__snake_case : Optional[int] ): def custom_forward(*__snake_case : Union[str, Any] ): return module(*__snake_case ) return custom_forward # down if is_torch_version('''>=''' , '''1.11.0''' ): for down_block in self.down_blocks: lowerCamelCase :List[str] = torch.utils.checkpoint.checkpoint( create_custom_forward(__snake_case ) , __snake_case , use_reentrant=__snake_case ) # middle lowerCamelCase :int = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __snake_case , use_reentrant=__snake_case ) else: for down_block in self.down_blocks: lowerCamelCase :Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(__snake_case ) , __snake_case ) # middle lowerCamelCase :Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __snake_case ) else: # down for down_block in self.down_blocks: lowerCamelCase :Optional[Any] = down_block(__snake_case ) # middle lowerCamelCase :List[str] = self.mid_block(__snake_case ) # post-process lowerCamelCase :Tuple = self.conv_norm_out(__snake_case ) lowerCamelCase :List[str] = self.conv_act(__snake_case ) lowerCamelCase :Tuple = self.conv_out(__snake_case ) return sample class _lowerCAmelCase ( nn.Module ): def __init__( self : List[str] , __snake_case : List[Any]=3 , __snake_case : Tuple=3 , __snake_case : Optional[int]=("UpDecoderBlock2D",) , __snake_case : Tuple=(64,) , __snake_case : str=2 , __snake_case : Optional[int]=32 , __snake_case : List[str]="silu" , __snake_case : List[Any]="group" , ): super().__init__() lowerCamelCase :str = layers_per_block lowerCamelCase :Tuple = nn.Convad( __snake_case , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) lowerCamelCase :Union[str, Any] = None lowerCamelCase :Tuple = nn.ModuleList([] ) lowerCamelCase :str = in_channels if norm_type == '''spatial''' else None # mid lowerCamelCase :str = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__snake_case , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__snake_case , temb_channels=__snake_case , ) # up lowerCamelCase :Union[str, Any] = list(reversed(__snake_case ) ) lowerCamelCase :Optional[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(__snake_case ): lowerCamelCase :Optional[Any] = output_channel lowerCamelCase :str = reversed_block_out_channels[i] lowerCamelCase :int = i == len(__snake_case ) - 1 lowerCamelCase :List[Any] = get_up_block( __snake_case , num_layers=self.layers_per_block + 1 , in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__snake_case , resnet_groups=__snake_case , attention_head_dim=__snake_case , temb_channels=__snake_case , resnet_time_scale_shift=__snake_case , ) self.up_blocks.append(__snake_case ) lowerCamelCase :Optional[Any] = output_channel # out if norm_type == "spatial": lowerCamelCase :Union[str, Any] = SpatialNorm(block_out_channels[0] , __snake_case ) else: lowerCamelCase :str = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__snake_case , eps=1e-6 ) lowerCamelCase :Any = nn.SiLU() lowerCamelCase :Optional[int] = nn.Convad(block_out_channels[0] , __snake_case , 3 , padding=1 ) lowerCamelCase :str = False def snake_case ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any]=None ): lowerCamelCase :str = z lowerCamelCase :List[Any] = self.conv_in(__snake_case ) lowerCamelCase :int = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__snake_case : Any ): def custom_forward(*__snake_case : Optional[int] ): return module(*__snake_case ) return custom_forward if is_torch_version('''>=''' , '''1.11.0''' ): # middle lowerCamelCase :int = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __snake_case , __snake_case , use_reentrant=__snake_case ) lowerCamelCase :Optional[int] = sample.to(__snake_case ) # up for up_block in self.up_blocks: lowerCamelCase :List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(__snake_case ) , __snake_case , __snake_case , use_reentrant=__snake_case ) else: # middle lowerCamelCase :str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __snake_case , __snake_case ) lowerCamelCase :Tuple = sample.to(__snake_case ) # up for up_block in self.up_blocks: lowerCamelCase :str = torch.utils.checkpoint.checkpoint(create_custom_forward(__snake_case ) , __snake_case , __snake_case ) else: # middle lowerCamelCase :str = self.mid_block(__snake_case , __snake_case ) lowerCamelCase :Optional[int] = sample.to(__snake_case ) # up for up_block in self.up_blocks: lowerCamelCase :Optional[Any] = up_block(__snake_case , __snake_case ) # post-process if latent_embeds is None: lowerCamelCase :Optional[int] = self.conv_norm_out(__snake_case ) else: lowerCamelCase :List[Any] = self.conv_norm_out(__snake_case , __snake_case ) lowerCamelCase :Tuple = self.conv_act(__snake_case ) lowerCamelCase :Union[str, Any] = self.conv_out(__snake_case ) return sample class _lowerCAmelCase ( nn.Module ): def __init__( self : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : Optional[Any]="random" , __snake_case : Any=False , __snake_case : int=True ): super().__init__() lowerCamelCase :List[str] = n_e lowerCamelCase :Dict = vq_embed_dim lowerCamelCase :int = beta lowerCamelCase :Optional[int] = legacy lowerCamelCase :Tuple = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) lowerCamelCase :Dict = remap if self.remap is not None: self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) ) lowerCamelCase :Tuple = self.used.shape[0] lowerCamelCase :Tuple = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": lowerCamelCase :List[Any] = self.re_embed lowerCamelCase :str = self.re_embed + 1 print( F"Remapping {self.n_e} indices to {self.re_embed} indices. " F"Using {self.unknown_index} for unknown indices." ) else: lowerCamelCase :Dict = n_e lowerCamelCase :Optional[Any] = sane_index_shape def snake_case ( self : Optional[Any] , __snake_case : Optional[Any] ): lowerCamelCase :Dict = inds.shape assert len(__snake_case ) > 1 lowerCamelCase :List[str] = inds.reshape(ishape[0] , -1 ) lowerCamelCase :List[Any] = self.used.to(__snake_case ) lowerCamelCase :Any = (inds[:, :, None] == used[None, None, ...]).long() lowerCamelCase :int = match.argmax(-1 ) lowerCamelCase :Any = match.sum(2 ) < 1 if self.unknown_index == "random": lowerCamelCase :Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: lowerCamelCase :List[Any] = self.unknown_index return new.reshape(__snake_case ) def snake_case ( self : Any , __snake_case : Optional[Any] ): lowerCamelCase :Tuple = inds.shape assert len(__snake_case ) > 1 lowerCamelCase :Optional[Any] = inds.reshape(ishape[0] , -1 ) lowerCamelCase :List[Any] = self.used.to(__snake_case ) if self.re_embed > self.used.shape[0]: # extra token lowerCamelCase :Union[str, Any] = 0 # simply set to zero lowerCamelCase :Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __snake_case ) return back.reshape(__snake_case ) def snake_case ( self : Any , __snake_case : Dict ): # reshape z -> (batch, height, width, channel) and flatten lowerCamelCase :Any = z.permute(0 , 2 , 3 , 1 ).contiguous() lowerCamelCase :Optional[int] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z lowerCamelCase :int = torch.argmin(torch.cdist(__snake_case , self.embedding.weight ) , dim=1 ) lowerCamelCase :List[str] = self.embedding(__snake_case ).view(z.shape ) lowerCamelCase :Optional[int] = None lowerCamelCase :Tuple = None # compute loss for embedding if not self.legacy: lowerCamelCase :Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: lowerCamelCase :Tuple = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients lowerCamelCase :List[Any] = z + (z_q - z).detach() # reshape back to match original input shape lowerCamelCase :Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: lowerCamelCase :Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis lowerCamelCase :int = self.remap_to_used(__snake_case ) lowerCamelCase :Union[str, Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: lowerCamelCase :Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def snake_case ( self : str , __snake_case : List[Any] , __snake_case : Union[str, Any] ): # shape specifying (batch, height, width, channel) if self.remap is not None: lowerCamelCase :Optional[int] = indices.reshape(shape[0] , -1 ) # add batch axis lowerCamelCase :Tuple = self.unmap_to_all(__snake_case ) lowerCamelCase :List[Any] = indices.reshape(-1 ) # flatten again # get quantized latent vectors lowerCamelCase :Union[str, Any] = self.embedding(__snake_case ) if shape is not None: lowerCamelCase :Optional[Any] = z_q.view(__snake_case ) # reshape back to match original input shape lowerCamelCase :Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : str=False ): lowerCamelCase :Any = parameters lowerCamelCase , lowerCamelCase :List[str] = torch.chunk(__snake_case , 2 , dim=1 ) lowerCamelCase :List[Any] = torch.clamp(self.logvar , -3_0.0 , 2_0.0 ) lowerCamelCase :str = deterministic lowerCamelCase :Dict = torch.exp(0.5 * self.logvar ) lowerCamelCase :Optional[int] = torch.exp(self.logvar ) if self.deterministic: lowerCamelCase :Optional[int] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def snake_case ( self : str , __snake_case : Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype lowerCamelCase :Tuple = randn_tensor( self.mean.shape , generator=__snake_case , device=self.parameters.device , dtype=self.parameters.dtype ) lowerCamelCase :str = self.mean + self.std * sample return x def snake_case ( self : List[str] , __snake_case : Optional[Any]=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def snake_case ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[Any]=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) lowerCamelCase :List[str] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__snake_case ) def snake_case ( self : str ): return self.mean
49
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ): lowerCamelCase :Optional[Any] = parent lowerCamelCase :List[Any] = batch_size lowerCamelCase :Any = image_size lowerCamelCase :Union[str, Any] = patch_size lowerCamelCase :Any = num_channels lowerCamelCase :List[Any] = is_training lowerCamelCase :Optional[Any] = use_labels lowerCamelCase :Any = hidden_size lowerCamelCase :List[Any] = num_hidden_layers lowerCamelCase :List[str] = num_attention_heads lowerCamelCase :Tuple = intermediate_size lowerCamelCase :List[str] = hidden_act lowerCamelCase :List[str] = hidden_dropout_prob lowerCamelCase :Any = attention_probs_dropout_prob lowerCamelCase :List[Any] = type_sequence_label_size lowerCamelCase :Optional[int] = initializer_range lowerCamelCase :List[Any] = num_labels lowerCamelCase :Any = scope lowerCamelCase :Union[str, Any] = n_targets lowerCamelCase :Optional[Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens def snake_case ( self : List[str] ): lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowerCamelCase :List[str] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowerCamelCase :Optional[int] = [] for i in range(self.batch_size ): lowerCamelCase :List[str] = {} lowerCamelCase :Tuple = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=__snake_case ) lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case ) labels.append(__snake_case ) lowerCamelCase :str = self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] ): return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ): lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :Union[str, Any] = model(__snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ): lowerCamelCase :int = YolosForObjectDetection(__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :str = model(pixel_values=__snake_case ) lowerCamelCase :Any = model(__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def snake_case ( self : int ): lowerCamelCase :List[Any] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () _UpperCAmelCase = ( {'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ): lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowerCamelCase :Dict = [] for i in range(self.model_tester.batch_size ): lowerCamelCase :Optional[Any] = {} lowerCamelCase :List[Any] = torch.ones( size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long ) lowerCamelCase :str = torch.ones( self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float ) labels.append(__snake_case ) lowerCamelCase :Union[str, Any] = labels return inputs_dict def snake_case ( self : Tuple ): lowerCamelCase :Union[str, Any] = YolosModelTester(self ) lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 ) def snake_case ( self : Union[str, Any] ): self.config_tester.run_common_tests() def snake_case ( self : Optional[Any] ): # YOLOS does not use inputs_embeds pass def snake_case ( self : Tuple ): lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :Optional[int] = model_class(__snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase :str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :str = model_class(__snake_case ) lowerCamelCase :Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase :Tuple = [*signature.parameters.keys()] lowerCamelCase :Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __snake_case ) def snake_case ( self : int ): lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase :int = True # in YOLOS, the seq_len is different lowerCamelCase :str = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowerCamelCase :str = True lowerCamelCase :Tuple = False lowerCamelCase :Optional[int] = True lowerCamelCase :int = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :str = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase :Optional[Any] = True lowerCamelCase :str = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Tuple = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowerCamelCase :Optional[int] = len(__snake_case ) # Check attention is always last and order is fine lowerCamelCase :Union[str, Any] = True lowerCamelCase :List[Any] = True lowerCamelCase :Tuple = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Dict = 1 self.assertEqual(out_len + added_hidden_states , len(__snake_case ) ) lowerCamelCase :Dict = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def snake_case ( self : List[str] ): def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ): lowerCamelCase :Union[str, Any] = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Optional[Any] = outputs.hidden_states lowerCamelCase :Any = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__snake_case ) , __snake_case ) # YOLOS has a different seq_length lowerCamelCase :List[str] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :Union[str, Any] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase :Any = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*__snake_case ) @slow def snake_case ( self : Dict ): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def _lowerCamelCase ( ): lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def snake_case ( self : Tuple ): return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def snake_case ( self : Dict ): lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case ) lowerCamelCase :Optional[Any] = self.default_image_processor lowerCamelCase :str = prepare_img() lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # forward pass with torch.no_grad(): lowerCamelCase :Optional[Any] = model(inputs.pixel_values ) # verify outputs lowerCamelCase :int = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , __snake_case ) lowerCamelCase :Any = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , ) lowerCamelCase :Any = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) ) # verify postprocessing lowerCamelCase :List[str] = image_processor.post_process_object_detection( __snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case ) lowerCamelCase :str = [75, 75, 17, 63, 17] lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case ) self.assertEqual(len(results['''scores'''] ) , 5 ) self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
49
1
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP A__ = False try: A__ = _is_package_available("""google.colab""") except ModuleNotFoundError: pass @input.register class _lowerCAmelCase : def __init__( self : int , __snake_case : str = None , __snake_case : list = [] ): lowerCamelCase :Optional[Any] = 0 lowerCamelCase :Any = choices lowerCamelCase :List[Any] = prompt if sys.platform == "win32": lowerCamelCase :Optional[Any] = '''*''' else: lowerCamelCase :List[Any] = '''➔ ''' def snake_case ( self : int , __snake_case : Optional[Any] , __snake_case : str = "" ): if sys.platform != "win32": writeColor(self.choices[index] , 32 , __snake_case ) else: forceWrite(self.choices[index] , __snake_case ) def snake_case ( self : Union[str, Any] , __snake_case : int ): if index == self.position: forceWrite(F" {self.arrow_char} " ) self.write_choice(__snake_case ) else: forceWrite(F" {self.choices[index]}" ) reset_cursor() def snake_case ( self : Union[str, Any] , __snake_case : Direction , __snake_case : int = 1 ): lowerCamelCase :int = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__snake_case ) move_cursor(__snake_case , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP['''up'''] ) def snake_case ( self : Dict ): self.move_direction(Direction.UP ) @input.mark(KEYMAP['''down'''] ) def snake_case ( self : List[str] ): self.move_direction(Direction.DOWN ) @input.mark(KEYMAP['''newline'''] ) def snake_case ( self : List[str] ): move_cursor(len(self.choices ) - self.position , '''DOWN''' ) return self.position @input.mark(KEYMAP['''interrupt'''] ) def snake_case ( self : List[str] ): move_cursor(len(self.choices ) - self.position , '''DOWN''' ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__snake_case )] for number in range(10 )] ) def snake_case ( self : str ): lowerCamelCase :Dict = int(chr(self.current_selection ) ) lowerCamelCase :Union[str, Any] = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , __snake_case ) else: return else: return def snake_case ( self : int , __snake_case : int = 0 ): if self.prompt: linebreak() forceWrite(self.prompt , '''\n''' ) if in_colab: forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' ) else: forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' ) lowerCamelCase :Tuple = default_choice for i in range(len(self.choices ) ): self.print_choice(__snake_case ) forceWrite('''\n''' ) move_cursor(len(self.choices ) - self.position , '''UP''' ) with cursor.hide(): while True: if in_colab: try: lowerCamelCase :List[Any] = int(builtins.input() ) except ValueError: lowerCamelCase :List[Any] = default_choice else: lowerCamelCase :Optional[int] = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , '''UP''' ) clear_line() self.write_choice(__snake_case , '''\n''' ) return choice
49
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Tuple ): lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils ) lowerCamelCase :Dict = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase :Any = test_metrics @require_cpu def snake_case ( self : Dict ): debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def snake_case ( self : int ): debug_launcher(self.test_metrics.main ) @require_single_gpu def snake_case ( self : Any ): self.test_metrics.main() @require_multi_gpu def snake_case ( self : Optional[int] ): print(F"Found {torch.cuda.device_count()} devices." ) lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__snake_case , env=os.environ.copy() )
49
1
def _lowerCamelCase ( a_ : str , a_ : Optional[int]): return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _lowerCamelCase ( a_ : Tuple , a_ : List[Any]=0): return sorted(a_ , key=lambda a_: x[column]) def _lowerCamelCase ( a_ : List[str] , a_ : str , a_ : Union[str, Any]=float('''inf''')): for i in range(points_counts - 1): for j in range(i + 1 , a_): lowerCamelCase :str = euclidean_distance_sqr(points[i] , points[j]) if current_dis < min_dis: lowerCamelCase :List[str] = current_dis return min_dis def _lowerCamelCase ( a_ : Optional[int] , a_ : List[Any] , a_ : Dict=float('''inf''')): for i in range(min(6 , points_counts - 1) , a_): for j in range(max(0 , i - 6) , a_): lowerCamelCase :List[Any] = euclidean_distance_sqr(points[i] , points[j]) if current_dis < min_dis: lowerCamelCase :Tuple = current_dis return min_dis def _lowerCamelCase ( a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[int]): # base case if points_counts <= 3: return dis_between_closest_pair(a_ , a_) # recursion lowerCamelCase :Any = points_counts // 2 lowerCamelCase :List[str] = closest_pair_of_points_sqr( a_ , points_sorted_on_y[:mid] , a_) lowerCamelCase :Any = closest_pair_of_points_sqr( a_ , points_sorted_on_y[mid:] , points_counts - mid) lowerCamelCase :Tuple = min(a_ , a_) lowerCamelCase :Tuple = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis: cross_strip.append(a_) lowerCamelCase :List[Any] = dis_between_closest_in_strip( a_ , len(a_) , a_) return min(a_ , a_) def _lowerCamelCase ( a_ : str , a_ : Union[str, Any]): lowerCamelCase :Optional[int] = column_based_sort(a_ , column=0) lowerCamelCase :List[Any] = column_based_sort(a_ , column=1) return ( closest_pair_of_points_sqr( a_ , a_ , a_) ) ** 0.5 if __name__ == "__main__": A__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("""Distance:""", closest_pair_of_points(points, len(points)))
49
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = '' _UpperCAmelCase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _UpperCAmelCase = None # compression type in fsspec. ex: "gzip" _UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ): super().__init__(self , **__snake_case ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase :Optional[Any] = fsspec.open( __snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] ) lowerCamelCase :Dict = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) lowerCamelCase :List[str] = None @classmethod def snake_case ( cls : Any , __snake_case : Any ): # compressed file paths are always relative to the archive root return super()._strip_protocol(__snake_case ).lstrip('''/''' ) def snake_case ( self : Any ): if self.dir_cache is None: lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} lowerCamelCase :Optional[Any] = {f['''name''']: f} def snake_case ( self : Union[str, Any] , __snake_case : str ): return self.file.open().read() def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ): lowerCamelCase :List[str] = self._strip_protocol(__snake_case ) if mode != "rb": raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" ) return self.file.open() class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'bz2' _UpperCAmelCase = 'bz2' _UpperCAmelCase = '.bz2' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'gzip' _UpperCAmelCase = 'gzip' _UpperCAmelCase = '.gz' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'lz4' _UpperCAmelCase = 'lz4' _UpperCAmelCase = '.lz4' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'xz' _UpperCAmelCase = 'xz' _UpperCAmelCase = '.xz' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'zstd' _UpperCAmelCase = 'zstd' _UpperCAmelCase = '.zst' def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ): super().__init__( fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase :Tuple = self.file.__enter__ class _lowerCAmelCase : def __init__( self : Dict , __snake_case : Tuple ): lowerCamelCase :Optional[int] = file_ def __enter__( self : Optional[int] ): self._file.__enter__() return self def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ): self._file.__exit__(*__snake_case , **__snake_case ) def __iter__( self : Optional[Any] ): return iter(self._file ) def snake_case ( self : List[Any] ): return next(self._file ) def __getattr__( self : Any , __snake_case : str ): return getattr(self._file , __snake_case ) def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ): return WrappedFile(_enter(*__snake_case , **__snake_case ) ) lowerCamelCase :Dict = fixed_enter
49
1
def _lowerCamelCase ( a_ : int): # noqa: E741 lowerCamelCase :List[Any] = len(a_) lowerCamelCase :List[str] = 0 lowerCamelCase :Union[str, Any] = [0] * n lowerCamelCase :Optional[int] = [False] * n lowerCamelCase :Optional[int] = [False] * n def dfs(a_ : List[str] , a_ : Dict , a_ : Union[str, Any] , a_ : List[str]): if parent == root: out_edge_count += 1 lowerCamelCase :List[Any] = True lowerCamelCase :Optional[Any] = at for to in l[at]: if to == parent: pass elif not visited[to]: lowerCamelCase :str = dfs(a_ , a_ , a_ , a_) lowerCamelCase :List[Any] = min(low[at] , low[to]) # AP found via bridge if at < low[to]: lowerCamelCase :Optional[int] = True # AP found via cycle if at == low[to]: lowerCamelCase :str = True else: lowerCamelCase :Any = min(low[at] , a_) return out_edge_count for i in range(a_): if not visited[i]: lowerCamelCase :Any = 0 lowerCamelCase :Dict = dfs(a_ , a_ , -1 , a_) lowerCamelCase :List[str] = out_edge_count > 1 for x in range(len(a_)): if is_art[x] is True: print(a_) # Adjacency list of graph A__ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
49
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = LEDTokenizer _UpperCAmelCase = LEDTokenizerFast _UpperCAmelCase = True def snake_case ( self : Any ): super().setUp() lowerCamelCase :Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCamelCase :int = {'''unk_token''': '''<unk>'''} lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__snake_case ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__snake_case ) ) def snake_case ( self : int , **__snake_case : int ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Dict , **__snake_case : Any ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ): return "lower newer", "lower newer" @cached_property def snake_case ( self : Any ): return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def snake_case ( self : int ): return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def snake_case ( self : str ): lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowerCamelCase :List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(__snake_case , __snake_case ) @require_torch def snake_case ( self : Tuple ): lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' ) self.assertIn('''input_ids''' , __snake_case ) self.assertIn('''attention_mask''' , __snake_case ) self.assertNotIn('''labels''' , __snake_case ) self.assertNotIn('''decoder_attention_mask''' , __snake_case ) @require_torch def snake_case ( self : Union[str, Any] ): lowerCamelCase :Union[str, Any] = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def snake_case ( self : List[Any] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[Any] = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def snake_case ( self : Optional[int] ): lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.'''] lowerCamelCase :Any = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' ) lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' ) lowerCamelCase :Optional[int] = inputs['''input_ids'''] lowerCamelCase :Any = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def snake_case ( self : Dict ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.'''] lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case ) lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']] lowerCamelCase :str = tokenizer.pad(__snake_case ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case ) def snake_case ( self : Tuple ): pass def snake_case ( self : Optional[int] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :int = '''A, <mask> AllenNLP sentence.''' lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case ) lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( __snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
49
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A__ = { """configuration_blenderbot_small""": [ """BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlenderbotSmallConfig""", """BlenderbotSmallOnnxConfig""", ], """tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""BlenderbotSmallTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlenderbotSmallForCausalLM""", """BlenderbotSmallForConditionalGeneration""", """BlenderbotSmallModel""", """BlenderbotSmallPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """TFBlenderbotSmallForConditionalGeneration""", """TFBlenderbotSmallModel""", """TFBlenderbotSmallPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """FlaxBlenderbotSmallForConditionalGeneration""", """FlaxBlenderbotSmallModel""", """FlaxBlenderbotSmallPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) A__ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""LayoutLMv2FeatureExtractor"""] A__ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
1
def _lowerCamelCase ( a_ : Optional[Any] , a_ : int): print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''') for i in range(a_): for j in range(a_): if dist[i][j] != float('''inf'''): print(int(dist[i][j]) , end='''\t''') else: print('''INF''' , end='''\t''') print() def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int]): lowerCamelCase :str = [[float('''inf''') for _ in range(a_)] for _ in range(a_)] for i in range(a_): for j in range(a_): lowerCamelCase :Tuple = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(a_): # looping through rows of graph array for i in range(a_): # looping through columns of graph array for j in range(a_): if ( dist[i][k] != float('''inf''') and dist[k][j] != float('''inf''') and dist[i][k] + dist[k][j] < dist[i][j] ): lowerCamelCase :List[str] = dist[i][k] + dist[k][j] _print_dist(a_ , a_) return dist, v if __name__ == "__main__": A__ = int(input("""Enter number of vertices: """)) A__ = int(input("""Enter number of edges: """)) A__ = [[float("""inf""") for i in range(v)] for j in range(v)] for i in range(v): A__ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("""\nEdge """, i + 1) A__ = int(input("""Enter source:""")) A__ = int(input("""Enter destination:""")) A__ = float(input("""Enter weight:""")) A__ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
49
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def snake_case ( *__snake_case : str , **__snake_case : str ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def snake_case ( self : Union[str, Any] ): lowerCamelCase :Optional[int] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__snake_case ) , [ [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}], [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}], ] , ) lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], ] , ) @require_tf def snake_case ( self : Tuple ): lowerCamelCase :Tuple = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , ) lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], ] , ) @slow @require_torch def snake_case ( self : Any ): lowerCamelCase :str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__snake_case ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__snake_case ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , )
49
1
import argparse import math import traceback import dateutil.parser as date_parser import requests def _lowerCamelCase ( a_ : Dict): lowerCamelCase :Dict = {} lowerCamelCase :Dict = job['''started_at'''] lowerCamelCase :List[str] = job['''completed_at'''] lowerCamelCase :int = date_parser.parse(a_) lowerCamelCase :List[Any] = date_parser.parse(a_) lowerCamelCase :int = round((end_datetime - start_datetime).total_seconds() / 60.0) lowerCamelCase :Any = start lowerCamelCase :Optional[Any] = end lowerCamelCase :List[str] = duration_in_min return job_info def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int]=None): lowerCamelCase :List[str] = None if token is not None: lowerCamelCase :List[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"} lowerCamelCase :Dict = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" lowerCamelCase :int = requests.get(a_ , headers=a_).json() lowerCamelCase :Any = {} try: job_time.update({job['''name''']: extract_time_from_single_job(a_) for job in result['''jobs''']}) lowerCamelCase :int = math.ceil((result['''total_count'''] - 1_00) / 1_00) for i in range(a_): lowerCamelCase :List[Any] = requests.get(url + F"&page={i + 2}" , headers=a_).json() job_time.update({job['''name''']: extract_time_from_single_job(a_) for job in result['''jobs''']}) return job_time except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}") return {} if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") A__ = parser.parse_args() A__ = get_job_time(args.workflow_run_id) A__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'{k}: {v["duration"]}')
49
import operator as op def _lowerCamelCase ( a_ : Tuple): lowerCamelCase :int = [] lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation lowerCamelCase :Optional[int] = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''') print('''-''' * (30 + len(a_))) for x in post_fix: if x.isdigit(): # if x in digit stack.append(a_) # append x to stack # output in tabular format print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') else: lowerCamelCase :Optional[Any] = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') lowerCamelCase :str = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') stack.append( str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , ) return int(stack[0]) if __name__ == "__main__": A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """) print("""\n\tResult = """, solve(Postfix))
49
1
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'EncodecFeatureExtractor' _UpperCAmelCase = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self : List[str] , __snake_case : List[Any] , __snake_case : List[str] ): super().__init__(__snake_case , __snake_case ) lowerCamelCase :List[str] = self.feature_extractor lowerCamelCase :Optional[Any] = False def snake_case ( self : Any , __snake_case : Union[str, Any]=None , __snake_case : str=None , __snake_case : List[Any]=True ): return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case ) def __call__( self : int , *__snake_case : Optional[int] , **__snake_case : int ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__snake_case , **__snake_case ) lowerCamelCase :Optional[int] = kwargs.pop('''audio''' , __snake_case ) lowerCamelCase :int = kwargs.pop('''sampling_rate''' , __snake_case ) lowerCamelCase :Optional[int] = kwargs.pop('''text''' , __snake_case ) if len(__snake_case ) > 0: lowerCamelCase :Any = args[0] lowerCamelCase :Union[str, Any] = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if text is not None: lowerCamelCase :List[str] = self.tokenizer(__snake_case , **__snake_case ) if audio is not None: lowerCamelCase :Tuple = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case ) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCamelCase :Tuple = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: lowerCamelCase :int = audio_inputs['''padding_mask'''] return inputs def snake_case ( self : Tuple , *__snake_case : Union[str, Any] , **__snake_case : Optional[int] ): lowerCamelCase :str = kwargs.pop('''audio''' , __snake_case ) lowerCamelCase :Any = kwargs.pop('''padding_mask''' , __snake_case ) if len(__snake_case ) > 0: lowerCamelCase :List[Any] = args[0] lowerCamelCase :int = args[1:] if audio_values is not None: return self._decode_audio(__snake_case , padding_mask=__snake_case ) else: return self.tokenizer.batch_decode(*__snake_case , **__snake_case ) def snake_case ( self : str , *__snake_case : Any , **__snake_case : int ): return self.tokenizer.decode(*__snake_case , **__snake_case ) def snake_case ( self : str , __snake_case : str , __snake_case : Optional = None ): lowerCamelCase :List[str] = to_numpy(__snake_case ) lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = audio_values.shape if padding_mask is None: return list(__snake_case ) lowerCamelCase :str = to_numpy(__snake_case ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCamelCase :Optional[int] = seq_len - padding_mask.shape[-1] lowerCamelCase :str = 1 - self.feature_extractor.padding_value lowerCamelCase :List[str] = np.pad(__snake_case , ((0, 0), (0, difference)) , '''constant''' , constant_values=__snake_case ) lowerCamelCase :Any = audio_values.tolist() for i in range(__snake_case ): lowerCamelCase :Optional[int] = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCamelCase :int = sliced_audio.reshape(__snake_case , -1 ) return audio_values
49
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() A__ = logging.get_logger(__name__) A__ = """Hello, World!""" A__ = """en_XX""" def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool): lowerCamelCase :int = Path('''data_bin''') lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , ) xmod.eval() # disable dropout print(a_) lowerCamelCase :Any = xmod.model.encoder.sentence_encoder lowerCamelCase :List[str] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our X-MOD config:''' , a_) lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_) model.eval() # Now let's copy all the weights. # Embeddings lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight lowerCamelCase :List[str] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them. lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers): # Encoder: start of layer lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i] lowerCamelCase :List[str] = xmod_sent_encoder.layers[i] # self attention lowerCamelCase :Optional[int] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) ): raise AssertionError('''Dimensions of self-attention weights do not match.''') lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias # self-attention output lowerCamelCase :Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''') lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias # intermediate lowerCamelCase :Optional[int] = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''') lowerCamelCase :int = xmod_layer.fca.weight lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias # output lowerCamelCase :List[str] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''') lowerCamelCase :str = xmod_layer.fca.weight lowerCamelCase :int = xmod_layer.fca.bias lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()): raise AssertionError('''Lists of language adapters do not match.''') for lang_code, adapter in xmod_layer.adapter_modules.items(): lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code] lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code] lowerCamelCase :List[Any] = from_adapter.fca.weight lowerCamelCase :List[Any] = from_adapter.fca.bias lowerCamelCase :Dict = from_adapter.fca.weight lowerCamelCase :Optional[Any] = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight lowerCamelCase :Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1 model.roberta.set_default_language(a_) lowerCamelCase :Any = model(a_)[0] if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_)) else: lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0] print(our_output.shape , their_output.shape) lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item() print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7 lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''') if not success: raise Exception('''Something went wRoNg''') Path(a_).mkdir(parents=a_ , exist_ok=a_) print(F"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(a_) if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) A__ = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
49
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler A__ = 16 A__ = 32 def _lowerCamelCase ( a_ : Accelerator , a_ : int = 16 , a_ : str = "bert-base-cased"): lowerCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(a_) lowerCamelCase :Optional[int] = load_dataset('''glue''' , '''mrpc''') def tokenize_function(a_ : Optional[Any]): # max_length=None => use the model max length (it's actually the default) lowerCamelCase :Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a_ , max_length=a_) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCamelCase :List[str] = datasets.map( a_ , batched=a_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=a_) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase :str = tokenized_datasets.rename_column('''label''' , '''labels''') def collate_fn(a_ : str): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(a_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''') return tokenizer.pad(a_ , padding='''longest''' , return_tensors='''pt''') # Instantiate dataloaders. lowerCamelCase :Tuple = DataLoader( tokenized_datasets['''train'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_) lowerCamelCase :Any = DataLoader( tokenized_datasets['''validation'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_) return train_dataloader, eval_dataloader def _lowerCamelCase ( a_ : Optional[int] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Dict): model.eval() lowerCamelCase :Any = 0 for step, batch in enumerate(a_): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): lowerCamelCase :List[str] = model(**a_) lowerCamelCase :Optional[int] = outputs.logits.argmax(dim=-1) # It is slightly faster to call this once, than multiple times lowerCamelCase , lowerCamelCase :Any = accelerator.gather( (predictions, batch['''labels'''])) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(a_) - 1: lowerCamelCase :List[str] = predictions[: len(eval_dataloader.dataset) - samples_seen] lowerCamelCase :str = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=a_ , references=a_ , ) lowerCamelCase :Union[str, Any] = metric.compute() return eval_metric["accuracy"] def _lowerCamelCase ( a_ : Tuple , a_ : int): # Initialize accelerator lowerCamelCase :Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase :Tuple = config['''lr'''] lowerCamelCase :Optional[int] = int(config['''num_epochs''']) lowerCamelCase :int = int(config['''seed''']) lowerCamelCase :int = int(config['''batch_size''']) lowerCamelCase :Union[str, Any] = args.model_name_or_path set_seed(a_) lowerCamelCase , lowerCamelCase :Union[str, Any] = get_dataloaders(a_ , a_ , a_) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase :List[Any] = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_) # Instantiate optimizer lowerCamelCase :Tuple = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCamelCase :int = optimizer_cls(params=model.parameters() , lr=a_) if accelerator.state.deepspeed_plugin is not None: lowerCamelCase :str = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: lowerCamelCase :Tuple = 1 lowerCamelCase :List[Any] = (len(a_) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCamelCase :List[str] = get_linear_schedule_with_warmup( optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , ) else: lowerCamelCase :List[str] = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = accelerator.prepare( a_ , a_ , a_ , a_ , a_) # We need to keep track of how many total steps we have iterated over lowerCamelCase :List[Any] = 0 # We also need to keep track of the stating epoch so files are named properly lowerCamelCase :int = 0 lowerCamelCase :List[Any] = evaluate.load('''glue''' , '''mrpc''') lowerCamelCase :Any = num_epochs if args.partial_train_epoch is not None: lowerCamelCase :int = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint) lowerCamelCase :Tuple = args.resume_from_checkpoint.split('''epoch_''')[1] lowerCamelCase :str = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break lowerCamelCase :Optional[Any] = int(a_) + 1 lowerCamelCase :Optional[int] = evaluation_loop(a_ , a_ , a_ , a_) accelerator.print('''resumed checkpoint performance:''' , a_) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0]) accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr''']) with open(os.path.join(args.output_dir , F"state_{starting_epoch-1}.json") , '''r''') as f: lowerCamelCase :Any = json.load(a_) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model lowerCamelCase :Optional[int] = {} for epoch in range(a_ , a_): model.train() for step, batch in enumerate(a_): lowerCamelCase :List[str] = model(**a_) lowerCamelCase :int = outputs.loss lowerCamelCase :int = loss / gradient_accumulation_steps accelerator.backward(a_) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 lowerCamelCase :List[Any] = F"epoch_{epoch}" lowerCamelCase :Tuple = os.path.join(args.output_dir , a_) accelerator.save_state(a_) lowerCamelCase :Tuple = evaluation_loop(a_ , a_ , a_ , a_) lowerCamelCase :int = accuracy lowerCamelCase :Union[str, Any] = lr_scheduler.get_lr()[0] lowerCamelCase :Any = optimizer.param_groups[0]['''lr'''] lowerCamelCase :List[Any] = epoch lowerCamelCase :Optional[Any] = overall_step accelerator.print(F"epoch {epoch}:" , a_) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F"state_{epoch}.json") , '''w''') as f: json.dump(a_ , a_) def _lowerCamelCase ( ): lowerCamelCase :List[str] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''') parser.add_argument( '''--model_name_or_path''' , type=a_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=a_ , ) parser.add_argument( '''--output_dir''' , type=a_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=a_ , default=a_ , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--partial_train_epoch''' , type=a_ , default=a_ , help='''If passed, the training will stop after this number of epochs.''' , ) parser.add_argument( '''--num_epochs''' , type=a_ , default=2 , help='''Number of train epochs.''' , ) lowerCamelCase :str = parser.parse_args() lowerCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(a_ , a_) if __name__ == "__main__": main()
49
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { """andreasmadsen/efficient_mlm_m0.40""": ( """https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json""" ), } class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'roberta-prelayernorm' def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) lowerCamelCase :Optional[int] = vocab_size lowerCamelCase :Dict = hidden_size lowerCamelCase :Tuple = num_hidden_layers lowerCamelCase :Optional[int] = num_attention_heads lowerCamelCase :Any = hidden_act lowerCamelCase :List[Any] = intermediate_size lowerCamelCase :Union[str, Any] = hidden_dropout_prob lowerCamelCase :str = attention_probs_dropout_prob lowerCamelCase :Tuple = max_position_embeddings lowerCamelCase :int = type_vocab_size lowerCamelCase :Optional[Any] = initializer_range lowerCamelCase :Union[str, Any] = layer_norm_eps lowerCamelCase :Dict = position_embedding_type lowerCamelCase :List[Any] = use_cache lowerCamelCase :Optional[int] = classifier_dropout class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): @property def snake_case ( self : Any ): if self.task == "multiple-choice": lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
49
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ = { """configuration_clap""": [ """CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ClapAudioConfig""", """ClapConfig""", """ClapTextConfig""", ], """processing_clap""": ["""ClapProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ClapModel""", """ClapPreTrainedModel""", """ClapTextModel""", """ClapTextModelWithProjection""", """ClapAudioModel""", """ClapAudioModelWithProjection""", ] A__ = ["""ClapFeatureExtractor"""] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = DebertaTokenizer _UpperCAmelCase = True _UpperCAmelCase = DebertaTokenizerFast def snake_case ( self : List[str] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase :Dict = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''} lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__snake_case ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__snake_case ) ) def snake_case ( self : str , **__snake_case : Dict ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Optional[Any] , __snake_case : int ): lowerCamelCase :List[Any] = '''lower newer''' lowerCamelCase :List[str] = '''lower newer''' return input_text, output_text def snake_case ( self : str ): lowerCamelCase :Optional[int] = self.get_tokenizer() lowerCamelCase :Union[str, Any] = '''lower newer''' lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) lowerCamelCase :List[str] = tokens + [tokenizer.unk_token] lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case ) def snake_case ( self : Optional[int] ): lowerCamelCase :List[str] = self.get_tokenizer() lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' ) lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __snake_case ) @slow def snake_case ( self : str ): lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case ) lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case ) lowerCamelCase :Union[str, Any] = tokenizer.encode( '''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case ) lowerCamelCase :str = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case ) lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case ) lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def snake_case ( self : str ): lowerCamelCase :List[str] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase :Tuple = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case ) lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']] # fmt: off lowerCamelCase :Any = { '''input_ids''': [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on lowerCamelCase :Optional[int] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __snake_case ) for expected, decoded in zip(__snake_case , __snake_case ): self.assertEqual(__snake_case , __snake_case )
49
1
def _lowerCamelCase ( a_ : list): lowerCamelCase :Union[str, Any] = 0 while len(a_) > 1: lowerCamelCase :List[Any] = 0 # Consider two files with minimum cost to be merged for _ in range(2): lowerCamelCase :int = files.index(min(a_)) temp += files[min_index] files.pop(a_) files.append(a_) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
49
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) A__ = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): lowerCamelCase :Tuple = None lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) lowerCamelCase :Optional[int] = os.path.abspath('''examples''' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ): lowerCamelCase :Union[str, Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) lowerCamelCase :int = '''\n'''.join(__snake_case ) if special_strings is not None: for string in special_strings: lowerCamelCase :int = diff.replace(__snake_case , '''''' ) self.assertEqual(__snake_case , '''''' ) def snake_case ( self : Dict ): self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) lowerCamelCase :Optional[int] = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} ) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = False @classmethod def snake_case ( cls : Optional[Any] ): super().setUpClass() lowerCamelCase :Any = tempfile.mkdtemp() lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case ( cls : Dict ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def snake_case ( self : int ): lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def snake_case ( self : List[Any] ): lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() lowerCamelCase :List[Any] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def snake_case ( self : List[str] ): lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split() lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) def snake_case ( self : str ): lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split() lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): lowerCamelCase :Union[str, Any] = torch.cuda.device_count() else: lowerCamelCase :Dict = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) else: self.assertIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) @slow def snake_case ( self : Any ): lowerCamelCase :Tuple = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case ) lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1] lowerCamelCase :List[str] = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def snake_case ( self : int ): lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : Any ): with tempfile.TemporaryDirectory() as tmpdir: lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) ) def snake_case ( self : Tuple ): lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def snake_case ( self : Optional[Any] ): lowerCamelCase :int = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
49
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: A__ = None A__ = logging.get_logger(__name__) A__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A__ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } A__ = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } A__ = """▁""" class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = AlbertTokenizer def __init__( self : int , __snake_case : Optional[Any]=None , __snake_case : Dict=None , __snake_case : Optional[int]=True , __snake_case : str=True , __snake_case : Any=False , __snake_case : Union[str, Any]="[CLS]" , __snake_case : str="[SEP]" , __snake_case : Union[str, Any]="<unk>" , __snake_case : Union[str, Any]="[SEP]" , __snake_case : List[Any]="<pad>" , __snake_case : Optional[Any]="[CLS]" , __snake_case : Optional[Any]="[MASK]" , **__snake_case : List[Any] , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCamelCase :Optional[Any] = ( AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case , normalized=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token ) super().__init__( __snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , ) lowerCamelCase :List[Any] = do_lower_case lowerCamelCase :str = remove_space lowerCamelCase :Dict = keep_accents lowerCamelCase :Union[str, Any] = vocab_file lowerCamelCase :str = False if not self.vocab_file else True def snake_case ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): lowerCamelCase :List[str] = [self.sep_token_id] lowerCamelCase :int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): lowerCamelCase :List[Any] = [self.sep_token_id] lowerCamelCase :Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__snake_case ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase :Optional[Any] = os.path.join( __snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ): copyfile(self.vocab_file , __snake_case ) return (out_vocab_file,)
49
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""") A__ = cvtColor(img, COLOR_BGR2GRAY) def _lowerCamelCase ( ): lowerCamelCase :int = cn.convert_to_negative(a_) # assert negative_img array for at least one True assert negative_img.any() def _lowerCamelCase ( ): with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img: # Work around assertion for response assert str(cc.change_contrast(a_ , 1_10)).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''') def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4) # Assert ambiguous array assert resp.all() def _lowerCamelCase ( ): lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0) # assert ambiguous array for all == True assert canny_img.all() lowerCamelCase :Optional[Any] = canny.canny(a_) # assert canny array for at least one True assert canny_array.any() def _lowerCamelCase ( ): assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all() def _lowerCamelCase ( ): # laplace diagonals lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_) assert res.any() def _lowerCamelCase ( ): assert med.median_filter(a_ , 3).any() def _lowerCamelCase ( ): lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_) assert grad.any() and theta.any() def _lowerCamelCase ( ): lowerCamelCase :Dict = sp.make_sepia(a_ , 20) assert sepia.all() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"): lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20) burkes.process() assert burkes.output_img.any() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ): lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00) nn.process() assert nn.output.any() def _lowerCamelCase ( ): lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. lowerCamelCase :Tuple = imread(a_ , 0) # Test for get_neighbors_pixel function() return not None lowerCamelCase :Dict = 0 lowerCamelCase :Optional[Any] = 0 lowerCamelCase :str = image[x_coordinate][y_coordinate] lowerCamelCase :Any = lbp.get_neighbors_pixel( a_ , a_ , a_ , a_) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1])) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0]): for j in range(0 , image.shape[1]): lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_) assert lbp_image.any()
49
1
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = MobileBertTokenizer _UpperCAmelCase = MobileBertTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = filter_non_english _UpperCAmelCase = 'google/mobilebert-uncased' def snake_case ( self : int ): super().setUp() lowerCamelCase :Tuple = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) lowerCamelCase :List[str] = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def snake_case ( self : str , __snake_case : List[Any] ): lowerCamelCase :Dict = '''UNwant\u00E9d,running''' lowerCamelCase :Any = '''unwanted, running''' return input_text, output_text def snake_case ( self : List[str] ): lowerCamelCase :int = self.tokenizer_class(self.vocab_file ) lowerCamelCase :Any = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] ) def snake_case ( self : str ): if not self.test_rust_tokenizer: return lowerCamelCase :List[Any] = self.get_tokenizer() lowerCamelCase :Union[str, Any] = self.get_rust_tokenizer() lowerCamelCase :Union[str, Any] = '''UNwant\u00E9d,running''' lowerCamelCase :List[Any] = tokenizer.tokenize(__snake_case ) lowerCamelCase :str = rust_tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) lowerCamelCase :str = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) lowerCamelCase :Any = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) self.assertListEqual(__snake_case , __snake_case ) lowerCamelCase :int = self.get_rust_tokenizer() lowerCamelCase :List[Any] = tokenizer.encode(__snake_case ) lowerCamelCase :Optional[int] = rust_tokenizer.encode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) # With lower casing lowerCamelCase :Tuple = self.get_tokenizer(do_lower_case=__snake_case ) lowerCamelCase :str = self.get_rust_tokenizer(do_lower_case=__snake_case ) lowerCamelCase :Optional[Any] = '''UNwant\u00E9d,running''' lowerCamelCase :int = tokenizer.tokenize(__snake_case ) lowerCamelCase :Optional[Any] = rust_tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) lowerCamelCase :Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) lowerCamelCase :Tuple = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) self.assertListEqual(__snake_case , __snake_case ) lowerCamelCase :Dict = self.get_rust_tokenizer() lowerCamelCase :Tuple = tokenizer.encode(__snake_case ) lowerCamelCase :Optional[Any] = rust_tokenizer.encode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) def snake_case ( self : Optional[int] ): lowerCamelCase :Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def snake_case ( self : Dict ): lowerCamelCase :Optional[int] = BasicTokenizer(do_lower_case=__snake_case ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def snake_case ( self : List[str] ): lowerCamelCase :Optional[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def snake_case ( self : str ): lowerCamelCase :Optional[int] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def snake_case ( self : str ): lowerCamelCase :Tuple = BasicTokenizer(do_lower_case=__snake_case ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def snake_case ( self : int ): lowerCamelCase :Union[str, Any] = BasicTokenizer(do_lower_case=__snake_case ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def snake_case ( self : Optional[Any] ): lowerCamelCase :List[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def snake_case ( self : List[Any] ): lowerCamelCase :Optional[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def snake_case ( self : Dict ): lowerCamelCase :Tuple = BasicTokenizer(do_lower_case=__snake_case , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] lowerCamelCase :Optional[Any] = {} for i, token in enumerate(__snake_case ): lowerCamelCase :List[Any] = i lowerCamelCase :int = WordpieceTokenizer(vocab=__snake_case , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def snake_case ( self : str ): self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def snake_case ( self : Optional[int] ): self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def snake_case ( self : List[Any] ): self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Optional[Any] = self.get_tokenizer() lowerCamelCase :str = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def snake_case ( self : int ): lowerCamelCase :str = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) lowerCamelCase :Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case ) lowerCamelCase :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case ) lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case ) lowerCamelCase :Tuple = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def snake_case ( self : Dict ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCamelCase :int = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :Union[str, Any] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCamelCase :Optional[int] = tokenizer_r.encode_plus( __snake_case , return_attention_mask=__snake_case , return_token_type_ids=__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case , ) lowerCamelCase :Tuple = tokenizer_r.do_lower_case if hasattr(__snake_case , '''do_lower_case''' ) else False lowerCamelCase :Union[str, Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def snake_case ( self : Any ): lowerCamelCase :Optional[int] = ['''的''', '''人''', '''有'''] lowerCamelCase :List[Any] = ''''''.join(__snake_case ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCamelCase :Optional[Any] = True lowerCamelCase :List[str] = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :List[str] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :Union[str, Any] = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case ) lowerCamelCase :Union[str, Any] = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case ) lowerCamelCase :List[str] = tokenizer_r.convert_ids_to_tokens(__snake_case ) lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(__snake_case ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__snake_case , __snake_case ) self.assertListEqual(__snake_case , __snake_case ) lowerCamelCase :Optional[Any] = False lowerCamelCase :Tuple = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :str = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case ) lowerCamelCase :Union[str, Any] = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case ) lowerCamelCase :Dict = tokenizer_r.convert_ids_to_tokens(__snake_case ) lowerCamelCase :Any = tokenizer_p.convert_ids_to_tokens(__snake_case ) # it is expected that only the first Chinese character is not preceded by "##". lowerCamelCase :List[str] = [ F"##{token}" if idx != 0 else token for idx, token in enumerate(__snake_case ) ] self.assertListEqual(__snake_case , __snake_case ) self.assertListEqual(__snake_case , __snake_case )
49
import os from math import logaa def _lowerCamelCase ( a_ : str = "base_exp.txt"): lowerCamelCase :float = 0 lowerCamelCase :Optional[int] = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))): lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''','''))) if x * logaa(a_) > largest: lowerCamelCase :List[Any] = x * logaa(a_) lowerCamelCase :Any = i + 1 return result if __name__ == "__main__": print(solution())
49
1
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. A__ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. A__ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. A__ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_000)) def _lowerCamelCase ( a_ : str , a_ : str): lowerCamelCase :int = len([g for position, g in enumerate(a_) if g == main_target[position]]) return (item, float(a_)) def _lowerCamelCase ( a_ : str , a_ : str): lowerCamelCase :str = random.randint(0 , len(a_) - 1) lowerCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:] lowerCamelCase :List[str] = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCamelCase ( a_ : str , a_ : list[str]): lowerCamelCase :Tuple = list(a_) if random.uniform(0 , 1) < MUTATION_PROBABILITY: lowerCamelCase :Any = random.choice(a_) return "".join(a_) def _lowerCamelCase ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , ): lowerCamelCase :Any = [] # Generate more children proportionally to the fitness score. lowerCamelCase :List[Any] = int(parent_a[1] * 1_00) + 1 lowerCamelCase :str = 10 if child_n >= 10 else child_n for _ in range(a_): lowerCamelCase :str = population_score[random.randint(0 , a_)][0] lowerCamelCase , lowerCamelCase :Dict = crossover(parent_a[0] , a_) # Append new string to the population list. pop.append(mutate(a_ , a_)) pop.append(mutate(a_ , a_)) return pop def _lowerCamelCase ( a_ : str , a_ : list[str] , a_ : bool = True): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowerCamelCase :List[str] = F"{N_POPULATION} must be bigger than {N_SELECTED}" raise ValueError(a_) # Verify that the target contains no genes besides the ones inside genes variable. lowerCamelCase :str = sorted({c for c in target if c not in genes}) if not_in_genes_list: lowerCamelCase :Optional[Any] = F"{not_in_genes_list} is not in genes list, evolution cannot converge" raise ValueError(a_) # Generate random starting population. lowerCamelCase :Union[str, Any] = [] for _ in range(a_): population.append(''''''.join([random.choice(a_) for i in range(len(a_))])) # Just some logs to know what the algorithms is doing. lowerCamelCase , lowerCamelCase :Union[str, Any] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(a_) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCamelCase :List[Any] = [evaluate(a_ , a_) for item in population] # Check if there is a matching evolution. lowerCamelCase :Optional[int] = sorted(a_ , key=lambda a_: x[1] , reverse=a_) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"\nGeneration: {generation}" F"\nTotal Population:{total_population}" F"\nBest score: {population_score[0][1]}" F"\nBest string: {population_score[0][0]}") # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCamelCase :int = population[: int(N_POPULATION / 3)] population.clear() population.extend(a_) # Normalize population score to be between 0 and 1. lowerCamelCase :List[Any] = [ (item, score / len(a_)) for item, score in population_score ] # This is selection for i in range(a_): population.extend(select(population_score[int(a_)] , a_ , a_)) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(a_) > N_POPULATION: break if __name__ == "__main__": A__ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) A__ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) A__ , A__ , A__ = basic(target_str, genes_list) print( F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
49
def _lowerCamelCase ( a_ : list): if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''') for cell_n in range(1 , len(grid[0])): grid[0][cell_n] += grid[0][cell_n - 1] lowerCamelCase :Any = grid[0] for row_n in range(1 , len(a_)): lowerCamelCase :List[str] = grid[row_n] lowerCamelCase :Union[str, Any] = fill_row(a_ , a_) lowerCamelCase :List[Any] = grid[row_n] return grid[-1][-1] def _lowerCamelCase ( a_ : list , a_ : list): current_row[0] += row_above[0] for cell_n in range(1 , len(a_)): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n]) return current_row if __name__ == "__main__": import doctest doctest.testmod()
49
1
def _lowerCamelCase ( a_ : list[int] , a_ : str): lowerCamelCase :Dict = int(a_) # Initialize Result lowerCamelCase :Dict = [] # Traverse through all denomination for denomination in reversed(a_): # Find denominations while int(a_) >= int(a_): total_value -= int(a_) answer.append(a_) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A__ = [] A__ = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): A__ = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F'Denomination {i}: ').strip())) A__ = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter A__ = [1, 2, 5, 10, 20, 50, 100, 500, 2_000] A__ = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F'Following is minimal change for {value}: ') A__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
49
import math def _lowerCamelCase ( a_ : int): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a_) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCamelCase ( a_ : float = 0.1): lowerCamelCase :Dict = 3 lowerCamelCase :List[Any] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1): primes += is_prime(a_) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
49
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) A__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[Any] = -1 lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :str = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :str = TextStreamer(__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :Optional[int] = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Dict ): lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[Any] = -1 lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] ) lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case ) lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() lowerCamelCase :Any = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[str] = -1 lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :] lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :int = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Optional[int] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case ) model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n" lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def snake_case ( self : List[Any] ): lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 ) lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__snake_case ): lowerCamelCase :Dict = '''''' for new_text in streamer: streamer_text += new_text
49
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _lowerCAmelCase ( unittest.TestCase ): def __init__( self : Any , __snake_case : List[Any] , __snake_case : List[Any]=7 , __snake_case : Union[str, Any]=3 , __snake_case : Dict=30 , __snake_case : Optional[Any]=400 , __snake_case : List[Any]=True , __snake_case : Optional[Any]=None , __snake_case : int=0.9 , __snake_case : Tuple=None , __snake_case : Dict=True , __snake_case : Optional[int]=[0.5, 0.5, 0.5] , __snake_case : Tuple=[0.5, 0.5, 0.5] , ): lowerCamelCase :List[str] = size if size is not None else {'''shortest_edge''': 30} lowerCamelCase :Tuple = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} lowerCamelCase :Optional[int] = parent lowerCamelCase :List[Any] = batch_size lowerCamelCase :int = num_channels lowerCamelCase :int = min_resolution lowerCamelCase :int = max_resolution lowerCamelCase :int = do_resize_and_center_crop lowerCamelCase :List[Any] = size lowerCamelCase :Union[str, Any] = crop_pct lowerCamelCase :int = crop_size lowerCamelCase :int = do_normalize lowerCamelCase :List[Any] = image_mean lowerCamelCase :Optional[int] = image_std def snake_case ( self : str ): return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def snake_case ( self : List[Any] ): lowerCamelCase :Optional[int] = PoolFormerImageProcessingTester(self ) @property def snake_case ( self : str ): return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self : List[str] ): lowerCamelCase :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__snake_case , '''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(__snake_case , '''size''' ) ) self.assertTrue(hasattr(__snake_case , '''crop_pct''' ) ) self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) ) self.assertTrue(hasattr(__snake_case , '''image_mean''' ) ) self.assertTrue(hasattr(__snake_case , '''image_std''' ) ) def snake_case ( self : List[str] ): lowerCamelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} ) lowerCamelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def snake_case ( self : List[Any] ): pass def snake_case ( self : Optional[int] ): # Initialize image_processing lowerCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , Image.Image ) # Test not batched input lowerCamelCase :Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCamelCase :Tuple = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case ( self : Tuple ): # Initialize image_processing lowerCamelCase :Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase :int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , np.ndarray ) # Test not batched input lowerCamelCase :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCamelCase :Optional[Any] = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case ( self : int ): # Initialize image_processing lowerCamelCase :int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , torch.Tensor ) # Test not batched input lowerCamelCase :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCamelCase :Tuple = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
49
from maths.prime_factors import prime_factors def _lowerCamelCase ( a_ : int): if not isinstance(a_ , a_): lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer" raise TypeError(a_) if number < 1: raise ValueError('''Input must be a positive integer''') return -1 if len(prime_factors(a_)) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
49
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A__ = logging.get_logger(__name__) A__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} A__ = { """tokenizer_file""": { """EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""", }, } A__ = { """gpt-neox-20b""": 2_048, } class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ['input_ids', 'attention_mask'] def __init__( self : int , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Tuple=None , __snake_case : str="<|endoftext|>" , __snake_case : Dict="<|endoftext|>" , __snake_case : Optional[int]="<|endoftext|>" , __snake_case : Any=False , **__snake_case : Optional[int] , ): super().__init__( __snake_case , __snake_case , tokenizer_file=__snake_case , unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , ) lowerCamelCase :List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __snake_case ) != add_prefix_space: lowerCamelCase :int = getattr(__snake_case , pre_tok_state.pop('''type''' ) ) lowerCamelCase :str = add_prefix_space lowerCamelCase :str = pre_tok_class(**__snake_case ) lowerCamelCase :Optional[int] = add_prefix_space def snake_case ( self : List[str] , __snake_case : str , __snake_case : Optional[str] = None ): lowerCamelCase :Any = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def snake_case ( self : Tuple , __snake_case : "Conversation" ): lowerCamelCase :Any = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__snake_case , add_special_tokens=__snake_case ) + [self.eos_token_id] ) if len(__snake_case ) > self.model_max_length: lowerCamelCase :Optional[Any] = input_ids[-self.model_max_length :] return input_ids
49
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() A__ = logging.get_logger(__name__) def _lowerCamelCase ( a_ : str , a_ : str=False): lowerCamelCase :Optional[int] = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''')) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''')) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''')) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''')) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''')) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''')) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''')) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias")) # transformer encoder for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias")) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ]) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ]) # fmt: on return rename_keys def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False): for i in range(config.num_hidden_layers): if base_model: lowerCamelCase :Union[str, Any] = '''''' else: lowerCamelCase :Optional[int] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight") lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict lowerCamelCase :Any = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size] lowerCamelCase :int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase :Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase :Optional[Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :] def _lowerCamelCase ( a_ : int): lowerCamelCase :Any = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a_ , a_) def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple): lowerCamelCase :Optional[Any] = dct.pop(a_) lowerCamelCase :str = val def _lowerCamelCase ( ): lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw) return im @torch.no_grad() def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False): lowerCamelCase :Optional[int] = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , ) lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00) lowerCamelCase :List[Any] = False # load original model from timm lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCamelCase :List[str] = timm_model.state_dict() if base_model: remove_classification_head_(a_) lowerCamelCase :Tuple = create_rename_keys(a_ , a_) for src, dest in rename_keys: rename_key(a_ , a_ , a_) read_in_q_k_v(a_ , a_ , a_) lowerCamelCase :List[str] = '''huggingface/label-files''' lowerCamelCase :Any = '''imagenet-1k-id2label.json''' lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r''')) lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()} lowerCamelCase :Optional[int] = idalabel lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval() else: lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval() model.load_state_dict(a_) # create image processor lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_)) lowerCamelCase :str = transform.transforms lowerCamelCase :int = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowerCamelCase :Any = ViTHybridImageProcessor( do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowerCamelCase :Dict = prepare_img() lowerCamelCase :str = transform(a_).unsqueeze(0) lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values # verify pixel values assert torch.allclose(a_ , a_) # verify logits with torch.no_grad(): lowerCamelCase :Optional[int] = model(a_) lowerCamelCase :Union[str, Any] = outputs.logits print('''Predicted class:''' , logits.argmax(-1).item()) if base_model: lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3) else: lowerCamelCase :List[str] = timm_model(a_) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a_ , outputs.logits , atol=1e-3) print('''Looks ok!''') if pytorch_dump_folder_path is not None: Path(a_).mkdir(exist_ok=a_) print(F"Saving model {vit_name} to {pytorch_dump_folder_path}") model.save_pretrained(a_) print(F"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(a_) if push_to_hub: print(F"Pushing model and processor to the hub {vit_name}") model.push_to_hub(F"ybelkada/{vit_name}") processor.push_to_hub(F"ybelkada/{vit_name}") if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) A__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
49
1
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() A__ = logging.get_logger(__name__) A__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } A__ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCamelCase ( a_ : List[str] , a_ : List[str] , a_ : str , a_ : Union[str, Any] , a_ : Optional[Any]): for attribute in key.split('''.'''): lowerCamelCase :Optional[Any] = getattr(a_ , a_) if weight_type is not None: lowerCamelCase :Optional[int] = getattr(a_ , a_).shape else: lowerCamelCase :int = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": lowerCamelCase :Any = value elif weight_type == "weight_g": lowerCamelCase :str = value elif weight_type == "weight_v": lowerCamelCase :str = value elif weight_type == "bias": lowerCamelCase :Dict = value else: lowerCamelCase :List[str] = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def _lowerCamelCase ( a_ : Optional[Any] , a_ : int): lowerCamelCase :List[str] = [] lowerCamelCase :Tuple = fairseq_model.state_dict() lowerCamelCase :Optional[int] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight lowerCamelCase :Union[str, Any] = None for name, value in fairseq_dict.items(): lowerCamelCase :List[str] = False if "conv_layers" in name: load_conv_layer( a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , ) lowerCamelCase :Optional[int] = True elif name.split('''.''')[0] == "proj": lowerCamelCase :Any = fairseq_model.proj lowerCamelCase :int = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]: lowerCamelCase :Tuple = True if "*" in mapped_key: lowerCamelCase :str = name.split(a_)[0].split('''.''')[-2] lowerCamelCase :int = mapped_key.replace('''*''' , a_) if "weight_g" in name: lowerCamelCase :Optional[int] = '''weight_g''' elif "weight_v" in name: lowerCamelCase :Union[str, Any] = '''weight_v''' elif "bias" in name: lowerCamelCase :Optional[Any] = '''bias''' elif "weight" in name: lowerCamelCase :str = '''weight''' else: lowerCamelCase :List[str] = None set_recursively(a_ , a_ , a_ , a_ , a_) continue if not is_used: unused_weights.append(a_) logger.warning(F"Unused weights: {unused_weights}") return proj_weight def _lowerCamelCase ( a_ : List[Any] , a_ : int , a_ : Optional[int] , a_ : int , a_ : Tuple): lowerCamelCase :Any = full_name.split('''conv_layers.''')[-1] lowerCamelCase :Union[str, Any] = name.split('''.''') lowerCamelCase :Dict = int(items[0]) lowerCamelCase :Optional[int] = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) lowerCamelCase :List[str] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) lowerCamelCase :str = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) lowerCamelCase :Tuple = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) lowerCamelCase :Dict = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(a_) def _lowerCamelCase ( a_ : Tuple): lowerCamelCase , lowerCamelCase :List[str] = emb.weight.shape lowerCamelCase :int = nn.Linear(a_ , a_ , bias=a_) lowerCamelCase :Any = emb.weight.data return lin_layer def _lowerCamelCase ( a_ : Union[str, Any]): with open(a_ , '''r''' , encoding='''utf-8''') as f: lowerCamelCase :Union[str, Any] = f.readlines() lowerCamelCase :Union[str, Any] = [line.split(''' ''')[0] for line in lines] lowerCamelCase :Tuple = len(a_) lowerCamelCase :Optional[Any] = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(a_ , range(4 , num_words + 4)))) return vocab_dict @torch.no_grad() def _lowerCamelCase ( a_ : List[Any] , a_ : Dict , a_ : Optional[int] , a_ : str , a_ : Tuple , a_ : Any , a_ : List[str] , ): lowerCamelCase :Any = WavaVecaConfig.from_pretrained(a_) lowerCamelCase :List[str] = SpeechaTextaConfig.from_pretrained( a_ , vocab_size=a_ , decoder_layers=a_ , do_stable_layer_norm=a_) lowerCamelCase :Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a_ , return_attention_mask=a_ , ) lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1])}) lowerCamelCase :Tuple = model[0].eval() # set weights for wav2vec2 encoder lowerCamelCase :Dict = WavaVecaModel(a_) lowerCamelCase :List[Any] = recursively_load_weights_wavaveca(model.encoder , a_) lowerCamelCase :int = SpeechaTextaForCausalLM(a_) lowerCamelCase , lowerCamelCase :Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_) # set output linear layer unexpected_keys.remove('''embed_out''') lowerCamelCase :List[str] = nn.Parameter(model.decoder.embed_out.detach()) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}") logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}") lowerCamelCase :Optional[Any] = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_) lowerCamelCase :Optional[int] = False # add projection layer lowerCamelCase :Any = nn.Parameter(projection_layer.weight) lowerCamelCase :str = nn.Parameter(projection_layer.bias) lowerCamelCase :Any = create_vocab_dict(a_) with open(os.path.join(a_ , '''vocab.json''') , '''w''') as fp: json.dump(a_ , a_) lowerCamelCase :List[Any] = SpeechaTextaTokenizer(os.path.join(a_ , '''vocab.json''')) tokenizer.save_pretrained(a_) lowerCamelCase :int = hf_wavavec.config.to_dict() lowerCamelCase :Tuple = tokenizer.pad_token_id lowerCamelCase :Tuple = tokenizer.bos_token_id lowerCamelCase :str = tokenizer.eos_token_id lowerCamelCase :str = '''speech_to_text_2''' lowerCamelCase :Any = '''wav2vec2''' lowerCamelCase :Optional[int] = SpeechEncoderDecoderConfig.from_dict(a_) hf_wavavec.save_pretrained(a_) feature_extractor.save_pretrained(a_) if __name__ == "__main__": A__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-large-lv60""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/s2t-small-mustc-en-fr-st""", type=str, help="""Path to hf decoder s2t checkpoint config""", ) parser.add_argument("""--vocab_size""", default=10_224, type=int, help="""Vocab size of decoder""") parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""") A__ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
49
def _lowerCamelCase ( a_ : int = 4_00_00_00): lowerCamelCase :Dict = [0, 1] lowerCamelCase :Optional[Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1]) if fib[i + 2] > n: break i += 1 lowerCamelCase :Dict = 0 for j in range(len(a_) - 1): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'{solution() = }')
49
1
def _lowerCamelCase ( a_ : str): assert column_title.isupper() lowerCamelCase :List[Any] = 0 lowerCamelCase :int = len(a_) - 1 lowerCamelCase :Dict = 0 while index >= 0: lowerCamelCase :List[Any] = (ord(column_title[index]) - 64) * pow(26 , a_) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
49
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ = { """configuration_nllb_moe""": [ """NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NllbMoeConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""", """NllbMoeForConditionalGeneration""", """NllbMoeModel""", """NllbMoePreTrainedModel""", """NllbMoeTop2Router""", """NllbMoeSparseMLP""", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Any ): lowerCamelCase :List[str] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase :Any = get_activation('''gelu''' ) self.assertTrue(torch.allclose(gelu_python(__snake_case ) , torch_builtin(__snake_case ) ) ) self.assertFalse(torch.allclose(gelu_python(__snake_case ) , gelu_new(__snake_case ) ) ) def snake_case ( self : List[Any] ): lowerCamelCase :int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase :List[Any] = get_activation('''gelu''' ) lowerCamelCase :Any = get_activation('''gelu_10''' ) lowerCamelCase :int = torch_builtin(__snake_case ) lowerCamelCase :Any = geluaa(__snake_case ) lowerCamelCase :Dict = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(__snake_case ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def snake_case ( self : Optional[int] ): get_activation('''gelu''' ) get_activation('''gelu_10''' ) get_activation('''gelu_fast''' ) get_activation('''gelu_new''' ) get_activation('''gelu_python''' ) get_activation('''gelu_pytorch_tanh''' ) get_activation('''linear''' ) get_activation('''mish''' ) get_activation('''quick_gelu''' ) get_activation('''relu''' ) get_activation('''sigmoid''' ) get_activation('''silu''' ) get_activation('''swish''' ) get_activation('''tanh''' ) with self.assertRaises(__snake_case ): get_activation('''bogus''' ) with self.assertRaises(__snake_case ): get_activation(__snake_case ) def snake_case ( self : List[Any] ): lowerCamelCase :List[Any] = get_activation('''gelu''' ) lowerCamelCase :Optional[Any] = 1 lowerCamelCase :List[str] = get_activation('''gelu''' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__snake_case ): lowerCamelCase :Dict = acta.a
49
import numpy class _lowerCAmelCase : def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ): lowerCamelCase :Dict = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCamelCase :Dict = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCamelCase :Dict = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCamelCase :Any = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCamelCase :Union[str, Any] = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCamelCase :List[str] = numpy.zeros(output_array.shape ) def snake_case ( self : Optional[int] ): lowerCamelCase :Any = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCamelCase :Any = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCamelCase :Dict = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def snake_case ( self : Any ): lowerCamelCase :Union[str, Any] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCamelCase :Dict = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCamelCase :int = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ): for iteration in range(1 , iterations + 1 ): lowerCamelCase :Union[str, Any] = self.feedforward() self.back_propagation() if give_loss: lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F"Iteration {iteration} Loss: {loss}" ) def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ): lowerCamelCase :int = input_arr lowerCamelCase :Union[str, Any] = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCamelCase :Optional[Any] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCamelCase :Optional[int] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _lowerCamelCase ( a_ : numpy.ndarray): return 1 / (1 + numpy.exp(-value)) def _lowerCamelCase ( a_ : numpy.ndarray): return (value) * (1 - (value)) def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa) # Calling neural network class. lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork( input_array=a_ , output_array=a_) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=a_ , iterations=10 , give_loss=a_) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa)) if __name__ == "__main__": example()
49
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase A__ = logging.get_logger(__name__) A__ = { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""", """allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""", """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json""" ), } class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'longformer' def __init__( self : Union[str, Any] , __snake_case : Union[List[int], int] = 512 , __snake_case : int = 2 , __snake_case : int = 1 , __snake_case : int = 0 , __snake_case : int = 2 , __snake_case : int = 30522 , __snake_case : int = 768 , __snake_case : int = 12 , __snake_case : int = 12 , __snake_case : int = 3072 , __snake_case : str = "gelu" , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : int = 512 , __snake_case : int = 2 , __snake_case : float = 0.0_2 , __snake_case : float = 1e-1_2 , __snake_case : bool = False , **__snake_case : int , ): super().__init__(pad_token_id=__snake_case , **__snake_case ) lowerCamelCase :Dict = attention_window lowerCamelCase :int = sep_token_id lowerCamelCase :int = bos_token_id lowerCamelCase :Any = eos_token_id lowerCamelCase :List[Any] = vocab_size lowerCamelCase :Optional[int] = hidden_size lowerCamelCase :int = num_hidden_layers lowerCamelCase :List[Any] = num_attention_heads lowerCamelCase :int = hidden_act lowerCamelCase :int = intermediate_size lowerCamelCase :List[str] = hidden_dropout_prob lowerCamelCase :Dict = attention_probs_dropout_prob lowerCamelCase :Optional[int] = max_position_embeddings lowerCamelCase :Optional[Any] = type_vocab_size lowerCamelCase :Union[str, Any] = initializer_range lowerCamelCase :Optional[int] = layer_norm_eps lowerCamelCase :Tuple = onnx_export class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : List[Any] , __snake_case : "PretrainedConfig" , __snake_case : str = "default" , __snake_case : "List[PatchingSpec]" = None ): super().__init__(__snake_case , __snake_case , __snake_case ) lowerCamelCase :Optional[Any] = True @property def snake_case ( self : Optional[int] ): if self.task == "multiple-choice": lowerCamelCase :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ] ) @property def snake_case ( self : Union[str, Any] ): lowerCamelCase :Dict = super().outputs if self.task == "default": lowerCamelCase :Optional[Any] = {0: '''batch'''} return outputs @property def snake_case ( self : List[Any] ): return 1e-4 @property def snake_case ( self : Union[str, Any] ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def snake_case ( self : Dict , __snake_case : "PreTrainedTokenizerBase" , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ): lowerCamelCase :Tuple = super().generate_dummy_inputs( preprocessor=__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowerCamelCase :int = torch.zeros_like(inputs['''input_ids'''] ) # make every second token global lowerCamelCase :Dict = 1 return inputs
49
def _lowerCamelCase ( a_ : str , a_ : str): lowerCamelCase :List[str] = len(a_) lowerCamelCase :List[str] = len(a_) lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)] lowerCamelCase :Optional[Any] = True for i in range(a_): for j in range(m + 1): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowerCamelCase :Any = True if a[i].islower(): lowerCamelCase :List[str] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
49
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A__ = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ): lowerCamelCase :Optional[Any] = parent lowerCamelCase :List[Any] = batch_size lowerCamelCase :Any = image_size lowerCamelCase :Union[str, Any] = patch_size lowerCamelCase :Any = num_channels lowerCamelCase :List[Any] = is_training lowerCamelCase :Optional[Any] = use_labels lowerCamelCase :Any = hidden_size lowerCamelCase :List[Any] = num_hidden_layers lowerCamelCase :List[str] = num_attention_heads lowerCamelCase :Tuple = intermediate_size lowerCamelCase :List[str] = hidden_act lowerCamelCase :List[str] = hidden_dropout_prob lowerCamelCase :Any = attention_probs_dropout_prob lowerCamelCase :List[Any] = type_sequence_label_size lowerCamelCase :Optional[int] = initializer_range lowerCamelCase :List[Any] = num_labels lowerCamelCase :Any = scope lowerCamelCase :Union[str, Any] = n_targets lowerCamelCase :Optional[Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens def snake_case ( self : List[str] ): lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowerCamelCase :List[str] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowerCamelCase :Optional[int] = [] for i in range(self.batch_size ): lowerCamelCase :List[str] = {} lowerCamelCase :Tuple = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=__snake_case ) lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case ) labels.append(__snake_case ) lowerCamelCase :str = self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] ): return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ): lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :Union[str, Any] = model(__snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ): lowerCamelCase :int = YolosForObjectDetection(__snake_case ) model.to(__snake_case ) model.eval() lowerCamelCase :str = model(pixel_values=__snake_case ) lowerCamelCase :Any = model(__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def snake_case ( self : int ): lowerCamelCase :List[Any] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () _UpperCAmelCase = ( {'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ): lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowerCamelCase :Dict = [] for i in range(self.model_tester.batch_size ): lowerCamelCase :Optional[Any] = {} lowerCamelCase :List[Any] = torch.ones( size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long ) lowerCamelCase :str = torch.ones( self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float ) labels.append(__snake_case ) lowerCamelCase :Union[str, Any] = labels return inputs_dict def snake_case ( self : Tuple ): lowerCamelCase :Union[str, Any] = YolosModelTester(self ) lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 ) def snake_case ( self : Union[str, Any] ): self.config_tester.run_common_tests() def snake_case ( self : Optional[Any] ): # YOLOS does not use inputs_embeds pass def snake_case ( self : Tuple ): lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :Optional[int] = model_class(__snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase :str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :str = model_class(__snake_case ) lowerCamelCase :Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase :Tuple = [*signature.parameters.keys()] lowerCamelCase :Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __snake_case ) def snake_case ( self : int ): lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase :int = True # in YOLOS, the seq_len is different lowerCamelCase :str = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowerCamelCase :str = True lowerCamelCase :Tuple = False lowerCamelCase :Optional[int] = True lowerCamelCase :int = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :str = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase :Optional[Any] = True lowerCamelCase :str = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Tuple = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowerCamelCase :Optional[int] = len(__snake_case ) # Check attention is always last and order is fine lowerCamelCase :Union[str, Any] = True lowerCamelCase :List[Any] = True lowerCamelCase :Tuple = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Dict = 1 self.assertEqual(out_len + added_hidden_states , len(__snake_case ) ) lowerCamelCase :Dict = outputs.attentions self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def snake_case ( self : List[str] ): def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ): lowerCamelCase :Union[str, Any] = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) ) lowerCamelCase :Optional[Any] = outputs.hidden_states lowerCamelCase :Any = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__snake_case ) , __snake_case ) # YOLOS has a different seq_length lowerCamelCase :List[str] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase :Union[str, Any] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase :Any = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*__snake_case ) @slow def snake_case ( self : Dict ): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def _lowerCamelCase ( ): lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def snake_case ( self : Tuple ): return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def snake_case ( self : Dict ): lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case ) lowerCamelCase :Optional[Any] = self.default_image_processor lowerCamelCase :str = prepare_img() lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # forward pass with torch.no_grad(): lowerCamelCase :Optional[Any] = model(inputs.pixel_values ) # verify outputs lowerCamelCase :int = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , __snake_case ) lowerCamelCase :Any = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , ) lowerCamelCase :Any = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) ) # verify postprocessing lowerCamelCase :List[str] = image_processor.post_process_object_detection( __snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case ) lowerCamelCase :str = [75, 75, 17, 63, 17] lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case ) self.assertEqual(len(results['''scores'''] ) , 5 ) self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
49
1
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _lowerCAmelCase : def __init__( self : int ): lowerCamelCase :str = '''''' lowerCamelCase :Tuple = '''''' lowerCamelCase :Any = [] lowerCamelCase :Union[str, Any] = 0 lowerCamelCase :Tuple = 256 lowerCamelCase :Optional[Any] = 0 lowerCamelCase :Tuple = 0 lowerCamelCase :Union[str, Any] = 0 lowerCamelCase :Any = 0 def snake_case ( self : Optional[int] , __snake_case : str ): lowerCamelCase :Dict = cva.imread(__snake_case , 0 ) lowerCamelCase :Dict = copy.deepcopy(self.img ) lowerCamelCase , lowerCamelCase , lowerCamelCase :Union[str, Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' ) lowerCamelCase :str = np.sum(__snake_case ) for i in range(len(__snake_case ) ): lowerCamelCase :str = x[i] / self.k self.sk += prk lowerCamelCase :Union[str, Any] = (self.L - 1) * self.sk if self.rem != 0: lowerCamelCase :Optional[Any] = int(last % last ) lowerCamelCase :Tuple = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__snake_case ) lowerCamelCase :List[str] = int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase :List[Any] = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase :int = self.img[j][i] if num != self.last_list[num]: lowerCamelCase :Optional[int] = self.last_list[num] cva.imwrite('''output_data/output.jpg''' , self.img ) def snake_case ( self : int ): plt.hist(self.img.ravel() , 256 , [0, 256] ) def snake_case ( self : str ): cva.imshow('''Output-Image''' , self.img ) cva.imshow('''Input-Image''' , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": A__ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") A__ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
49
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Tuple ): lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils ) lowerCamelCase :Dict = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase :Any = test_metrics @require_cpu def snake_case ( self : Dict ): debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def snake_case ( self : int ): debug_launcher(self.test_metrics.main ) @require_single_gpu def snake_case ( self : Any ): self.test_metrics.main() @require_multi_gpu def snake_case ( self : Optional[int] ): print(F"Found {torch.cuda.device_count()} devices." ) lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__snake_case , env=os.environ.copy() )
49
1
from ...configuration_utils import PretrainedConfig class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'bert-generation' def __init__( self : Dict , __snake_case : List[str]=50358 , __snake_case : Union[str, Any]=1024 , __snake_case : int=24 , __snake_case : Tuple=16 , __snake_case : List[Any]=4096 , __snake_case : int="gelu" , __snake_case : List[str]=0.1 , __snake_case : List[str]=0.1 , __snake_case : Dict=512 , __snake_case : int=0.0_2 , __snake_case : Dict=1e-1_2 , __snake_case : int=0 , __snake_case : int=2 , __snake_case : Dict=1 , __snake_case : int="absolute" , __snake_case : List[Any]=True , **__snake_case : List[Any] , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) lowerCamelCase :Dict = vocab_size lowerCamelCase :Optional[Any] = hidden_size lowerCamelCase :Optional[Any] = num_hidden_layers lowerCamelCase :Dict = num_attention_heads lowerCamelCase :Optional[int] = hidden_act lowerCamelCase :Union[str, Any] = intermediate_size lowerCamelCase :Any = hidden_dropout_prob lowerCamelCase :Optional[Any] = attention_probs_dropout_prob lowerCamelCase :Optional[Any] = max_position_embeddings lowerCamelCase :Optional[Any] = initializer_range lowerCamelCase :List[str] = layer_norm_eps lowerCamelCase :List[str] = position_embedding_type lowerCamelCase :List[str] = use_cache
49
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = '' _UpperCAmelCase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _UpperCAmelCase = None # compression type in fsspec. ex: "gzip" _UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ): super().__init__(self , **__snake_case ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase :Optional[Any] = fsspec.open( __snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] ) lowerCamelCase :Dict = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) lowerCamelCase :List[str] = None @classmethod def snake_case ( cls : Any , __snake_case : Any ): # compressed file paths are always relative to the archive root return super()._strip_protocol(__snake_case ).lstrip('''/''' ) def snake_case ( self : Any ): if self.dir_cache is None: lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} lowerCamelCase :Optional[Any] = {f['''name''']: f} def snake_case ( self : Union[str, Any] , __snake_case : str ): return self.file.open().read() def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ): lowerCamelCase :List[str] = self._strip_protocol(__snake_case ) if mode != "rb": raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" ) return self.file.open() class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'bz2' _UpperCAmelCase = 'bz2' _UpperCAmelCase = '.bz2' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'gzip' _UpperCAmelCase = 'gzip' _UpperCAmelCase = '.gz' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'lz4' _UpperCAmelCase = 'lz4' _UpperCAmelCase = '.lz4' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'xz' _UpperCAmelCase = 'xz' _UpperCAmelCase = '.xz' class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'zstd' _UpperCAmelCase = 'zstd' _UpperCAmelCase = '.zst' def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ): super().__init__( fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase :Tuple = self.file.__enter__ class _lowerCAmelCase : def __init__( self : Dict , __snake_case : Tuple ): lowerCamelCase :Optional[int] = file_ def __enter__( self : Optional[int] ): self._file.__enter__() return self def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ): self._file.__exit__(*__snake_case , **__snake_case ) def __iter__( self : Optional[Any] ): return iter(self._file ) def snake_case ( self : List[Any] ): return next(self._file ) def __getattr__( self : Any , __snake_case : str ): return getattr(self._file , __snake_case ) def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ): return WrappedFile(_enter(*__snake_case , **__snake_case ) ) lowerCamelCase :Dict = fixed_enter
49
1
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() A__ = logging.get_logger(__name__) A__ = """Hello, World!""" A__ = """en_XX""" def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool): lowerCamelCase :int = Path('''data_bin''') lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , ) xmod.eval() # disable dropout print(a_) lowerCamelCase :Any = xmod.model.encoder.sentence_encoder lowerCamelCase :List[str] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our X-MOD config:''' , a_) lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_) model.eval() # Now let's copy all the weights. # Embeddings lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight lowerCamelCase :List[str] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them. lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers): # Encoder: start of layer lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i] lowerCamelCase :List[str] = xmod_sent_encoder.layers[i] # self attention lowerCamelCase :Optional[int] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) ): raise AssertionError('''Dimensions of self-attention weights do not match.''') lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias # self-attention output lowerCamelCase :Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''') lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias # intermediate lowerCamelCase :Optional[int] = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''') lowerCamelCase :int = xmod_layer.fca.weight lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias # output lowerCamelCase :List[str] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''') lowerCamelCase :str = xmod_layer.fca.weight lowerCamelCase :int = xmod_layer.fca.bias lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()): raise AssertionError('''Lists of language adapters do not match.''') for lang_code, adapter in xmod_layer.adapter_modules.items(): lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code] lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code] lowerCamelCase :List[Any] = from_adapter.fca.weight lowerCamelCase :List[Any] = from_adapter.fca.bias lowerCamelCase :Dict = from_adapter.fca.weight lowerCamelCase :Optional[Any] = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight lowerCamelCase :Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1 model.roberta.set_default_language(a_) lowerCamelCase :Any = model(a_)[0] if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_)) else: lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0] print(our_output.shape , their_output.shape) lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item() print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7 lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''') if not success: raise Exception('''Something went wRoNg''') Path(a_).mkdir(parents=a_ , exist_ok=a_) print(F"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(a_) if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) A__ = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
49
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = LEDTokenizer _UpperCAmelCase = LEDTokenizerFast _UpperCAmelCase = True def snake_case ( self : Any ): super().setUp() lowerCamelCase :Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCamelCase :int = {'''unk_token''': '''<unk>'''} lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__snake_case ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__snake_case ) ) def snake_case ( self : int , **__snake_case : int ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Dict , **__snake_case : Any ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ): return "lower newer", "lower newer" @cached_property def snake_case ( self : Any ): return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def snake_case ( self : int ): return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def snake_case ( self : str ): lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowerCamelCase :List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(__snake_case , __snake_case ) @require_torch def snake_case ( self : Tuple ): lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' ) self.assertIn('''input_ids''' , __snake_case ) self.assertIn('''attention_mask''' , __snake_case ) self.assertNotIn('''labels''' , __snake_case ) self.assertNotIn('''decoder_attention_mask''' , __snake_case ) @require_torch def snake_case ( self : Union[str, Any] ): lowerCamelCase :Union[str, Any] = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def snake_case ( self : List[Any] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[Any] = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def snake_case ( self : Optional[int] ): lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.'''] lowerCamelCase :Any = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' ) lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' ) lowerCamelCase :Optional[int] = inputs['''input_ids'''] lowerCamelCase :Any = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def snake_case ( self : Dict ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.'''] lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case ) lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']] lowerCamelCase :str = tokenizer.pad(__snake_case ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case ) def snake_case ( self : Tuple ): pass def snake_case ( self : Optional[int] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase :int = '''A, <mask> AllenNLP sentence.''' lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case ) lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( __snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
49
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def _lowerCamelCase ( a_ : Dataset , a_ : Dict[str, str]): lowerCamelCase :str = args.log_outputs lowerCamelCase :int = '''_'''.join(args.dataset.split('''/''') + [args.config, args.split]) # load metric lowerCamelCase :Optional[Any] = load_metric('''wer''') lowerCamelCase :Any = load_metric('''cer''') # compute metrics lowerCamelCase :int = wer.compute(references=result['''target'''] , predictions=result['''prediction''']) lowerCamelCase :str = cer.compute(references=result['''target'''] , predictions=result['''prediction''']) # print & log results lowerCamelCase :List[str] = F"WER: {wer_result}\nCER: {cer_result}" print(a_) with open(F"{dataset_id}_eval_results.txt" , '''w''') as f: f.write(a_) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCamelCase :str = F"log_{dataset_id}_predictions.txt" lowerCamelCase :Optional[int] = F"log_{dataset_id}_targets.txt" with open(a_ , '''w''') as p, open(a_ , '''w''') as t: # mapping function to write output def write_to_file(a_ : int , a_ : Optional[Any]): p.write(F"{i}" + '''\n''') p.write(batch['''prediction'''] + '''\n''') t.write(F"{i}" + '''\n''') t.write(batch['''target'''] + '''\n''') result.map(a_ , with_indices=a_) def _lowerCamelCase ( a_ : str): lowerCamelCase :Union[str, Any] = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCamelCase :int = re.sub(a_ , '''''' , text.lower()) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCamelCase :Any = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCamelCase :List[Any] = ''' '''.join(text.split(a_)) return text def _lowerCamelCase ( a_ : int): # load dataset lowerCamelCase :Tuple = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=a_) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCamelCase :str = AutoFeatureExtractor.from_pretrained(args.model_id) lowerCamelCase :Dict = feature_extractor.sampling_rate # resample audio lowerCamelCase :Tuple = dataset.cast_column('''audio''' , Audio(sampling_rate=a_)) # load eval pipeline if args.device is None: lowerCamelCase :str = 0 if torch.cuda.is_available() else -1 lowerCamelCase :List[str] = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device) # map function to decode audio def map_to_pred(a_ : List[str]): lowerCamelCase :str = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s) lowerCamelCase :Any = prediction['''text'''] lowerCamelCase :Optional[int] = normalize_text(batch['''sentence''']) return batch # run inference on all examples lowerCamelCase :Union[str, Any] = dataset.map(a_ , remove_columns=dataset.column_names) # compute and log_results # do not change function below log_results(a_ , a_) if __name__ == "__main__": A__ = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) A__ = parser.parse_args() main(args)
49
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) A__ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""LayoutLMv2FeatureExtractor"""] A__ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
49
1
import unittest import torch from torch import nn from diffusers.models.activations import get_activation class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Union[str, Any] ): lowerCamelCase :int = get_activation('''swish''' ) self.assertIsInstance(__snake_case , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Optional[Any] = get_activation('''silu''' ) self.assertIsInstance(__snake_case , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def snake_case ( self : int ): lowerCamelCase :Dict = get_activation('''mish''' ) self.assertIsInstance(__snake_case , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def snake_case ( self : List[str] ): lowerCamelCase :Union[str, Any] = get_activation('''gelu''' ) self.assertIsInstance(__snake_case , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
49
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def snake_case ( *__snake_case : str , **__snake_case : str ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def snake_case ( self : Union[str, Any] ): lowerCamelCase :Optional[int] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__snake_case ) , [ [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}], [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}], ] , ) lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], ] , ) @require_tf def snake_case ( self : Tuple ): lowerCamelCase :Tuple = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , ) lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], ] , ) @slow @require_torch def snake_case ( self : Any ): lowerCamelCase :str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__snake_case ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__snake_case ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , )
49
1
import heapq import sys import numpy as np A__ = tuple[int, int] class _lowerCAmelCase : def __init__( self : str ): lowerCamelCase :int = [] lowerCamelCase :List[str] = set() def snake_case ( self : List[str] ): if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def snake_case ( self : int ): return len(self.elements ) == 0 def snake_case ( self : str , __snake_case : Union[str, Any] , __snake_case : Any ): if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__snake_case ) else: # update # print("update", item) lowerCamelCase :Union[str, Any] = [] ((lowerCamelCase) , (lowerCamelCase)) :str = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((lowerCamelCase) , (lowerCamelCase)) :Tuple = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def snake_case ( self : List[str] , __snake_case : str ): if item in self.set: self.set.remove(__snake_case ) lowerCamelCase :Any = [] ((lowerCamelCase) , (lowerCamelCase)) :Optional[Any] = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((lowerCamelCase) , (lowerCamelCase)) :int = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def snake_case ( self : str ): return self.elements[0][1] def snake_case ( self : List[str] ): ((lowerCamelCase) , (lowerCamelCase)) :str = heapq.heappop(self.elements ) self.set.remove(__snake_case ) return (priority, item) def _lowerCamelCase ( a_ : TPos , a_ : TPos): # euclidean distance lowerCamelCase :Optional[int] = np.array(a_) lowerCamelCase :Union[str, Any] = np.array(a_) return np.linalg.norm(a - b) def _lowerCamelCase ( a_ : TPos , a_ : TPos): # integer division by time variable return consistent_heuristic(a_ , a_) // t def _lowerCamelCase ( a_ : TPos , a_ : TPos): # manhattan distance return abs(p[0] - goal[0]) + abs(p[1] - goal[1]) def _lowerCamelCase ( a_ : TPos , a_ : int , a_ : TPos , a_ : dict[TPos, float]): lowerCamelCase :Optional[int] = g_function[start] + Wa * heuristics[i](a_ , a_) return ans def _lowerCamelCase ( a_ : Optional[Any] , a_ : Tuple , a_ : str): lowerCamelCase :List[str] = np.chararray((n, n)) for i in range(a_): for j in range(a_): lowerCamelCase :Dict = '''*''' for i in range(a_): for j in range(a_): if (j, (n - 1) - i) in blocks: lowerCamelCase :str = '''#''' lowerCamelCase :Tuple = '''-''' lowerCamelCase :Optional[int] = back_pointer[goal] while x != start: ((lowerCamelCase) , (lowerCamelCase)) :List[str] = x # print(x) lowerCamelCase :int = '''-''' lowerCamelCase :Dict = back_pointer[x] lowerCamelCase :Optional[Any] = '''-''' for i in range(a_): for j in range(a_): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''') print('''<-- End position''' , end=''' ''') else: print(grid[i][j] , end=''' ''') print() print('''^''') print('''Start position''') print() print('''# is an obstacle''') print('''- is the path taken by algorithm''') print('''PATH TAKEN BY THE ALGORITHM IS:-''') lowerCamelCase :str = back_pointer[goal] while x != start: print(a_ , end=''' ''') lowerCamelCase :List[str] = back_pointer[x] print(a_) sys.exit() def _lowerCamelCase ( a_ : TPos): if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def _lowerCamelCase ( a_ : Any , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict , a_ : Dict , a_ : Tuple , a_ : str , a_ : Any , ): for itera in range(a_): open_list[itera].remove_element(a_) # print("s", s) # print("j", j) ((lowerCamelCase) , (lowerCamelCase)) :List[str] = s lowerCamelCase :Tuple = (x - 1, y) lowerCamelCase :Union[str, Any] = (x + 1, y) lowerCamelCase :Optional[Any] = (x, y + 1) lowerCamelCase :int = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(a_) and neighbours not in visited: # print("neighbour", neighbours) visited.add(a_) lowerCamelCase :Optional[int] = -1 lowerCamelCase :List[Any] = float('''inf''') if valid(a_) and g_function[neighbours] > g_function[s] + 1: lowerCamelCase :Dict = g_function[s] + 1 lowerCamelCase :Any = s if neighbours not in close_list_anchor: open_list[0].put(a_ , key(a_ , 0 , a_ , a_)) if neighbours not in close_list_inad: for var in range(1 , a_): if key(a_ , a_ , a_ , a_) <= Wa * key( a_ , 0 , a_ , a_): open_list[j].put( a_ , key(a_ , a_ , a_ , a_)) def _lowerCamelCase ( ): lowerCamelCase :int = [] for x in range(1 , 5): for y in range(1 , 6): some_list.append((x, y)) for x in range(15 , 20): some_list.append((x, 17)) for x in range(10 , 19): for y in range(1 , 15): some_list.append((x, y)) # L block for x in range(1 , 4): for y in range(12 , 19): some_list.append((x, y)) for x in range(3 , 13): for y in range(16 , 19): some_list.append((x, y)) return some_list A__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} A__ = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] A__ = make_common_ground() A__ = blocks_blk # hyper parameters A__ = 1 A__ = 1 A__ = 20 A__ = 3 # one consistent and two other inconsistent # start and end destination A__ = (0, 0) A__ = (n - 1, n - 1) A__ = 1 def _lowerCamelCase ( a_ : TPos , a_ : TPos , a_ : int): lowerCamelCase :Dict = {start: 0, goal: float('''inf''')} lowerCamelCase :Optional[Any] = {start: -1, goal: -1} lowerCamelCase :Optional[Any] = [] lowerCamelCase :Any = set() for i in range(a_): open_list.append(PriorityQueue()) open_list[i].put(a_ , key(a_ , a_ , a_ , a_)) lowerCamelCase :list[int] = [] lowerCamelCase :list[int] = [] while open_list[0].minkey() < float('''inf'''): for i in range(1 , a_): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf'''): do_something(a_ , a_ , a_) else: lowerCamelCase , lowerCamelCase :Optional[int] = open_list[i].top_show() visited.add(a_) expand_state( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) close_list_inad.append(a_) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf'''): do_something(a_ , a_ , a_) else: lowerCamelCase :List[Any] = open_list[0].top_show() visited.add(a_) expand_state( a_ , 0 , a_ , a_ , a_ , a_ , a_ , a_ , ) close_list_anchor.append(a_) print('''No path found to goal''') print() for i in range(n - 1 , -1 , -1): for j in range(a_): if (j, i) in blocks: print('''#''' , end=''' ''') elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''') else: print('''-''' , end=''' ''') else: print('''*''' , end=''' ''') if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''') print() print('''^''') print('''Start position''') print() print('''# is an obstacle''') print('''- is the path taken by algorithm''') if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
49
import operator as op def _lowerCamelCase ( a_ : Tuple): lowerCamelCase :int = [] lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation lowerCamelCase :Optional[int] = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''') print('''-''' * (30 + len(a_))) for x in post_fix: if x.isdigit(): # if x in digit stack.append(a_) # append x to stack # output in tabular format print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') else: lowerCamelCase :Optional[Any] = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') lowerCamelCase :str = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') stack.append( str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , ) return int(stack[0]) if __name__ == "__main__": A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """) print("""\n\tResult = """, solve(Postfix))
49
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _lowerCAmelCase : _UpperCAmelCase = PegasusConfig _UpperCAmelCase = {} _UpperCAmelCase = 'gelu' def __init__( self : Optional[Any] , __snake_case : Tuple , __snake_case : Tuple=13 , __snake_case : Optional[Any]=7 , __snake_case : Union[str, Any]=True , __snake_case : List[str]=False , __snake_case : Dict=99 , __snake_case : List[str]=32 , __snake_case : List[str]=2 , __snake_case : Any=4 , __snake_case : Union[str, Any]=37 , __snake_case : Dict=0.1 , __snake_case : int=0.1 , __snake_case : Optional[Any]=40 , __snake_case : str=2 , __snake_case : Optional[int]=1 , __snake_case : Union[str, Any]=0 , ): lowerCamelCase :str = parent lowerCamelCase :Any = batch_size lowerCamelCase :Tuple = seq_length lowerCamelCase :Optional[int] = is_training lowerCamelCase :Optional[Any] = use_labels lowerCamelCase :int = vocab_size lowerCamelCase :Optional[int] = hidden_size lowerCamelCase :Any = num_hidden_layers lowerCamelCase :Optional[int] = num_attention_heads lowerCamelCase :List[str] = intermediate_size lowerCamelCase :Any = hidden_dropout_prob lowerCamelCase :int = attention_probs_dropout_prob lowerCamelCase :str = max_position_embeddings lowerCamelCase :Union[str, Any] = eos_token_id lowerCamelCase :List[str] = pad_token_id lowerCamelCase :Union[str, Any] = bos_token_id def snake_case ( self : Optional[Any] ): lowerCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCamelCase :str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase :List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase :Optional[int] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCamelCase :int = prepare_pegasus_inputs_dict(__snake_case , __snake_case , __snake_case ) return config, inputs_dict def snake_case ( self : str , __snake_case : Optional[int] , __snake_case : Dict ): lowerCamelCase :List[Any] = TFPegasusModel(config=__snake_case ).get_decoder() lowerCamelCase :Optional[int] = inputs_dict['''input_ids'''] lowerCamelCase :str = input_ids[:1, :] lowerCamelCase :Any = inputs_dict['''attention_mask'''][:1, :] lowerCamelCase :Tuple = inputs_dict['''head_mask'''] lowerCamelCase :str = 1 # first forward pass lowerCamelCase :Optional[Any] = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case ) lowerCamelCase , lowerCamelCase :Any = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase :int = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase :Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCamelCase :Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCamelCase :Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCamelCase :int = model(__snake_case , attention_mask=__snake_case )[0] lowerCamelCase :int = model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCamelCase :List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCamelCase :Optional[Any] = output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase :str = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__snake_case , __snake_case , rtol=1e-3 ) def _lowerCamelCase ( a_ : Dict , a_ : Optional[Any] , a_ : str , a_ : int=None , a_ : str=None , a_ : Any=None , a_ : Tuple=None , a_ : Union[str, Any]=None , ): if attention_mask is None: lowerCamelCase :Optional[int] = tf.cast(tf.math.not_equal(a_ , config.pad_token_id) , tf.inta) if decoder_attention_mask is None: lowerCamelCase :int = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta), ] , axis=-1 , ) if head_mask is None: lowerCamelCase :List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: lowerCamelCase :Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: lowerCamelCase :Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _UpperCAmelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _UpperCAmelCase = ( { 'conversational': TFPegasusForConditionalGeneration, 'feature-extraction': TFPegasusModel, 'summarization': TFPegasusForConditionalGeneration, 'text2text-generation': TFPegasusForConditionalGeneration, 'translation': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = False def snake_case ( self : Any ): lowerCamelCase :Any = TFPegasusModelTester(self ) lowerCamelCase :Union[str, Any] = ConfigTester(self , config_class=__snake_case ) def snake_case ( self : Any ): self.config_tester.run_common_tests() def snake_case ( self : Tuple ): lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__snake_case ) @require_sentencepiece @require_tokenizers @require_tf class _lowerCAmelCase ( unittest.TestCase ): _UpperCAmelCase = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] _UpperCAmelCase = [ 'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to' ' reduce the risk of wildfires.', 'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.', ] # differs slightly from pytorch, likely due to numerical differences in linear layers _UpperCAmelCase = 'google/pegasus-xsum' @cached_property def snake_case ( self : List[str] ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def snake_case ( self : List[str] ): lowerCamelCase :Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def snake_case ( self : int , **__snake_case : Optional[int] ): lowerCamelCase :List[str] = self.translate_src_text(**__snake_case ) assert self.expected_text == generated_words def snake_case ( self : Any , **__snake_case : Union[str, Any] ): lowerCamelCase :Tuple = self.tokenizer(self.src_text , **__snake_case , padding=__snake_case , return_tensors='''tf''' ) lowerCamelCase :List[Any] = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__snake_case , ) lowerCamelCase :Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__snake_case ) return generated_words @slow def snake_case ( self : Any ): self._assert_generated_batch_equal_expected()
49
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() A__ = logging.get_logger(__name__) A__ = """Hello, World!""" A__ = """en_XX""" def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool): lowerCamelCase :int = Path('''data_bin''') lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , ) xmod.eval() # disable dropout print(a_) lowerCamelCase :Any = xmod.model.encoder.sentence_encoder lowerCamelCase :List[str] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our X-MOD config:''' , a_) lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_) model.eval() # Now let's copy all the weights. # Embeddings lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight lowerCamelCase :List[str] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them. lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers): # Encoder: start of layer lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i] lowerCamelCase :List[str] = xmod_sent_encoder.layers[i] # self attention lowerCamelCase :Optional[int] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) ): raise AssertionError('''Dimensions of self-attention weights do not match.''') lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias # self-attention output lowerCamelCase :Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''') lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias # intermediate lowerCamelCase :Optional[int] = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''') lowerCamelCase :int = xmod_layer.fca.weight lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias # output lowerCamelCase :List[str] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''') lowerCamelCase :str = xmod_layer.fca.weight lowerCamelCase :int = xmod_layer.fca.bias lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()): raise AssertionError('''Lists of language adapters do not match.''') for lang_code, adapter in xmod_layer.adapter_modules.items(): lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code] lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code] lowerCamelCase :List[Any] = from_adapter.fca.weight lowerCamelCase :List[Any] = from_adapter.fca.bias lowerCamelCase :Dict = from_adapter.fca.weight lowerCamelCase :Optional[Any] = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight lowerCamelCase :Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1 model.roberta.set_default_language(a_) lowerCamelCase :Any = model(a_)[0] if classification_head: lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_)) else: lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0] print(our_output.shape , their_output.shape) lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item() print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7 lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''') if not success: raise Exception('''Something went wRoNg''') Path(a_).mkdir(parents=a_ , exist_ok=a_) print(F"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(a_) if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) A__ = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
49
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 1 @register_to_config def __init__( self : str , __snake_case : Dict=2000 , __snake_case : Union[str, Any]=0.1 , __snake_case : Union[str, Any]=20 , __snake_case : Optional[int]=1e-3 ): lowerCamelCase :Any = None lowerCamelCase :Union[str, Any] = None lowerCamelCase :Optional[Any] = None def snake_case ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, torch.device] = None ): lowerCamelCase :List[Any] = torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case ) def snake_case ( self : Any , __snake_case : str , __snake_case : Any , __snake_case : Dict , __snake_case : int=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCamelCase :Any = ( -0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCamelCase :str = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCamelCase :Optional[Any] = std.flatten() while len(std.shape ) < len(score.shape ): lowerCamelCase :Any = std.unsqueeze(-1 ) lowerCamelCase :Optional[int] = -score / std # compute lowerCamelCase :List[str] = -1.0 / len(self.timesteps ) lowerCamelCase :Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCamelCase :int = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCamelCase :Any = beta_t.unsqueeze(-1 ) lowerCamelCase :List[Any] = -0.5 * beta_t * x lowerCamelCase :List[Any] = torch.sqrt(__snake_case ) lowerCamelCase :Union[str, Any] = drift - diffusion**2 * score lowerCamelCase :Optional[int] = x + drift * dt # add noise lowerCamelCase :Tuple = randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype ) lowerCamelCase :str = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : Tuple ): return self.config.num_train_timesteps
49
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { """andreasmadsen/efficient_mlm_m0.40""": ( """https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json""" ), } class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 'roberta-prelayernorm' def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) lowerCamelCase :Optional[int] = vocab_size lowerCamelCase :Dict = hidden_size lowerCamelCase :Tuple = num_hidden_layers lowerCamelCase :Optional[int] = num_attention_heads lowerCamelCase :Any = hidden_act lowerCamelCase :List[Any] = intermediate_size lowerCamelCase :Union[str, Any] = hidden_dropout_prob lowerCamelCase :str = attention_probs_dropout_prob lowerCamelCase :Tuple = max_position_embeddings lowerCamelCase :int = type_vocab_size lowerCamelCase :Optional[Any] = initializer_range lowerCamelCase :Union[str, Any] = layer_norm_eps lowerCamelCase :Dict = position_embedding_type lowerCamelCase :List[Any] = use_cache lowerCamelCase :Optional[int] = classifier_dropout class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): @property def snake_case ( self : Any ): if self.task == "multiple-choice": lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
49
1
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def _lowerCamelCase ( a_ : Any): lowerCamelCase :Any = int(a_) lowerCamelCase , lowerCamelCase , lowerCamelCase :Optional[int] = t // 36_00, (t // 60) % 60, t % 60 return F"{h}:{m:02d}:{s:02d}" if h != 0 else F"{m:02d}:{s:02d}" def _lowerCamelCase ( a_ : Any , a_ : Union[str, Any] , a_ : Dict , a_ : List[str] , a_ : List[str]=3_00): # docstyle-ignore return F"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n " def _lowerCamelCase ( a_ : int): lowerCamelCase :int = '''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F" <th>{i}</th>\n" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: lowerCamelCase :List[str] = F"{elt:.6f}" if isinstance(a_ , a_) else str(a_) html_code += F" <td>{elt}</td>\n" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class _lowerCAmelCase : _UpperCAmelCase = 5 _UpperCAmelCase = 0.2 def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[str] = None , __snake_case : bool = True , __snake_case : Optional["NotebookTrainingTracker"] = None , __snake_case : int = 300 , ): lowerCamelCase :Dict = total lowerCamelCase :Tuple = '''''' if prefix is None else prefix lowerCamelCase :Tuple = leave lowerCamelCase :List[Any] = parent lowerCamelCase :int = width lowerCamelCase :Optional[Any] = None lowerCamelCase :str = None lowerCamelCase :List[Any] = None def snake_case ( self : Optional[int] , __snake_case : int , __snake_case : bool = False , __snake_case : str = None ): lowerCamelCase :Tuple = value if comment is not None: lowerCamelCase :int = comment if self.last_value is None: lowerCamelCase :Dict = time.time() lowerCamelCase :List[str] = value lowerCamelCase :Dict = None lowerCamelCase :Tuple = self.warmup lowerCamelCase :Dict = 1 self.update_bar(__snake_case ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 lowerCamelCase :Dict = time.time() lowerCamelCase :int = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: lowerCamelCase :Optional[int] = self.elapsed_time / (value - self.start_value) else: lowerCamelCase :Optional[int] = None if value >= self.total: lowerCamelCase :List[str] = self.total lowerCamelCase :List[str] = None if not self.leave: self.close() elif self.average_time_per_item is not None: lowerCamelCase :List[str] = self.average_time_per_item * (self.total - value) self.update_bar(__snake_case ) lowerCamelCase :Optional[Any] = value lowerCamelCase :Tuple = current_time if self.average_time_per_item is None: lowerCamelCase :Optional[int] = 1 else: lowerCamelCase :Union[str, Any] = max(int(self.update_every / self.average_time_per_item ) , 1 ) def snake_case ( self : Optional[Any] , __snake_case : Tuple , __snake_case : List[Any]=None ): lowerCamelCase :List[Any] = ''' ''' * (len(str(self.total ) ) - len(str(__snake_case ) )) + str(__snake_case ) if self.elapsed_time is None: lowerCamelCase :str = F"[{spaced_value}/{self.total} : < :" elif self.predicted_remaining is None: lowerCamelCase :Optional[Any] = F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}" else: lowerCamelCase :str = ( F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <" F" {format_time(self.predicted_remaining )}" ) self.label += F", {1/self.average_time_per_item:.2f} it/s" self.label += "]" if self.comment is None or len(self.comment ) == 0 else F", {self.comment}]" self.display() def snake_case ( self : Union[str, Any] ): lowerCamelCase :Dict = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: lowerCamelCase :Union[str, Any] = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case ) else: self.output.update(disp.HTML(self.html_code ) ) def snake_case ( self : str ): if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''' ) ) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : int , __snake_case : Optional[int] , __snake_case : List[Any]=None ): super().__init__(__snake_case ) lowerCamelCase :List[Any] = None if column_names is None else [column_names] lowerCamelCase :Optional[int] = None def snake_case ( self : List[Any] ): lowerCamelCase :int = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: lowerCamelCase :List[Any] = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case ) else: self.output.update(disp.HTML(self.html_code ) ) def snake_case ( self : Union[str, Any] , __snake_case : Tuple ): if self.inner_table is None: lowerCamelCase :Union[str, Any] = [list(values.keys() ), list(values.values() )] else: lowerCamelCase :Optional[int] = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(__snake_case ) lowerCamelCase :List[Any] = columns self.inner_table.append([values[c] for c in columns] ) def snake_case ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : List[Any]=None , __snake_case : Tuple=300 ): lowerCamelCase :Any = NotebookProgressBar(__snake_case , prefix=__snake_case , parent=self , width=__snake_case ) return self.child_bar def snake_case ( self : Optional[Any] ): lowerCamelCase :int = None self.display() class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : Any ): lowerCamelCase :Optional[Any] = None lowerCamelCase :int = None lowerCamelCase :Optional[int] = False def snake_case ( self : Any , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Any , **__snake_case : Optional[int] ): lowerCamelCase :int = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' lowerCamelCase :List[Any] = 0 lowerCamelCase :List[str] = 0 lowerCamelCase :Tuple = [self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''' ) lowerCamelCase :Optional[Any] = NotebookTrainingTracker(state.max_steps , __snake_case ) def snake_case ( self : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , **__snake_case : Any ): lowerCamelCase :List[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F"{state.epoch:.2f}" self.training_tracker.update( state.global_step + 1 , comment=F"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , ) lowerCamelCase :Optional[int] = False def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : str , __snake_case : int=None , **__snake_case : Optional[Any] ): if not has_length(__snake_case ): return if self.prediction_bar is None: if self.training_tracker is not None: lowerCamelCase :List[str] = self.training_tracker.add_child(len(__snake_case ) ) else: lowerCamelCase :str = NotebookProgressBar(len(__snake_case ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def snake_case ( self : List[str] , __snake_case : Dict , __snake_case : Any , __snake_case : str , **__snake_case : List[Any] ): if self.prediction_bar is not None: self.prediction_bar.close() lowerCamelCase :List[str] = None def snake_case ( self : int , __snake_case : str , __snake_case : int , __snake_case : str , __snake_case : Any=None , **__snake_case : Tuple ): # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: lowerCamelCase :Tuple = {'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy lowerCamelCase :Dict = state.global_step self.training_tracker.write_line(__snake_case ) def snake_case ( self : int , __snake_case : List[str] , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any]=None , **__snake_case : str ): if self.training_tracker is not None: lowerCamelCase :Optional[Any] = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history ): if "loss" in log: lowerCamelCase :Any = log['''loss'''] break if self.first_column == "Epoch": lowerCamelCase :Tuple = int(state.epoch ) else: lowerCamelCase :Optional[int] = state.global_step lowerCamelCase :Tuple = '''eval''' for k in metrics: if k.endswith('''_loss''' ): lowerCamelCase :Union[str, Any] = re.sub(R'''\_loss$''' , '''''' , __snake_case ) lowerCamelCase :List[Any] = metrics.pop('''total_flos''' , __snake_case ) lowerCamelCase :Union[str, Any] = metrics.pop('''epoch''' , __snake_case ) lowerCamelCase :List[Any] = metrics.pop(F"{metric_key_prefix}_runtime" , __snake_case ) lowerCamelCase :Dict = metrics.pop(F"{metric_key_prefix}_samples_per_second" , __snake_case ) lowerCamelCase :List[Any] = metrics.pop(F"{metric_key_prefix}_steps_per_second" , __snake_case ) lowerCamelCase :List[str] = metrics.pop(F"{metric_key_prefix}_jit_compilation_time" , __snake_case ) for k, v in metrics.items(): if k == F"{metric_key_prefix}_loss": lowerCamelCase :List[str] = v else: lowerCamelCase :Any = k.split('''_''' ) lowerCamelCase :Optional[Any] = ''' '''.join([part.capitalize() for part in splits[1:]] ) lowerCamelCase :Union[str, Any] = v self.training_tracker.write_line(__snake_case ) self.training_tracker.remove_child() lowerCamelCase :Dict = None # Evaluation takes a long time so we should force the next update. lowerCamelCase :List[Any] = True def snake_case ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Tuple , **__snake_case : Tuple ): self.training_tracker.update( state.global_step , comment=F"Epoch {int(state.epoch )}/{state.num_train_epochs}" , force_update=__snake_case ) lowerCamelCase :Any = None
49
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase = DebertaTokenizer _UpperCAmelCase = True _UpperCAmelCase = DebertaTokenizerFast def snake_case ( self : List[str] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase :Dict = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''} lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__snake_case ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__snake_case ) ) def snake_case ( self : str , **__snake_case : Dict ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def snake_case ( self : Optional[Any] , __snake_case : int ): lowerCamelCase :List[Any] = '''lower newer''' lowerCamelCase :List[str] = '''lower newer''' return input_text, output_text def snake_case ( self : str ): lowerCamelCase :Optional[int] = self.get_tokenizer() lowerCamelCase :Union[str, Any] = '''lower newer''' lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) lowerCamelCase :List[str] = tokens + [tokenizer.unk_token] lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case ) def snake_case ( self : Optional[int] ): lowerCamelCase :List[str] = self.get_tokenizer() lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' ) lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __snake_case ) @slow def snake_case ( self : str ): lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case ) lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case ) lowerCamelCase :Union[str, Any] = tokenizer.encode( '''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case ) lowerCamelCase :str = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case ) lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case ) lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def snake_case ( self : str ): lowerCamelCase :List[str] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) lowerCamelCase :Tuple = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case ) lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']] # fmt: off lowerCamelCase :Any = { '''input_ids''': [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on lowerCamelCase :Optional[int] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __snake_case ) for expected, decoded in zip(__snake_case , __snake_case ): self.assertEqual(__snake_case , __snake_case )
49
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer A__ = logging.get_logger(__name__) A__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} A__ = { """vocab_file""": { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json""" ), }, } A__ = { """yjernite/retribert-base-uncased""": 512, } A__ = { """yjernite/retribert-base-uncased""": {"""do_lower_case""": True}, } class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase = RetriBertTokenizer _UpperCAmelCase = ['input_ids', 'attention_mask'] def __init__( self : List[str] , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=True , __snake_case : Optional[int]="[UNK]" , __snake_case : List[str]="[SEP]" , __snake_case : str="[PAD]" , __snake_case : Any="[CLS]" , __snake_case : List[Any]="[MASK]" , __snake_case : Any=True , __snake_case : Tuple=None , **__snake_case : Dict , ): super().__init__( __snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , ) lowerCamelCase :Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __snake_case ) != do_lower_case or normalizer_state.get('''strip_accents''' , __snake_case ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __snake_case ) != tokenize_chinese_chars ): lowerCamelCase :Any = getattr(__snake_case , normalizer_state.pop('''type''' ) ) lowerCamelCase :Union[str, Any] = do_lower_case lowerCamelCase :Tuple = strip_accents lowerCamelCase :Dict = tokenize_chinese_chars lowerCamelCase :Union[str, Any] = normalizer_class(**__snake_case ) lowerCamelCase :Optional[Any] = do_lower_case def snake_case ( self : Optional[int] , __snake_case : List[Any] , __snake_case : List[str]=None ): lowerCamelCase :List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): lowerCamelCase :Optional[int] = [self.sep_token_id] lowerCamelCase :Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self : List[str] , __snake_case : str , __snake_case : Optional[str] = None ): lowerCamelCase :Any = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case )
49
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) A__ = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): lowerCamelCase :Tuple = None lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) lowerCamelCase :Optional[int] = os.path.abspath('''examples''' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ): lowerCamelCase :Union[str, Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) lowerCamelCase :int = '''\n'''.join(__snake_case ) if special_strings is not None: for string in special_strings: lowerCamelCase :int = diff.replace(__snake_case , '''''' ) self.assertEqual(__snake_case , '''''' ) def snake_case ( self : Dict ): self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) self.one_complete_example('''complete_nlp_example.py''' , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) lowerCamelCase :Optional[int] = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} ) class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = False @classmethod def snake_case ( cls : Optional[Any] ): super().setUpClass() lowerCamelCase :Any = tempfile.mkdtemp() lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case ( cls : Dict ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def snake_case ( self : int ): lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def snake_case ( self : List[Any] ): lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() lowerCamelCase :List[Any] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def snake_case ( self : List[str] ): lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split() lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) def snake_case ( self : str ): lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split() lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): lowerCamelCase :Union[str, Any] = torch.cuda.device_count() else: lowerCamelCase :Dict = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) else: self.assertIn('''epoch 0:''' , __snake_case ) self.assertIn('''epoch 1:''' , __snake_case ) @slow def snake_case ( self : Any ): lowerCamelCase :Tuple = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case ) lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case ) lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1] lowerCamelCase :List[str] = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def snake_case ( self : int ): lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case ( self : Any ): with tempfile.TemporaryDirectory() as tmpdir: lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) ) def snake_case ( self : Tuple ): lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def snake_case ( self : Optional[Any] ): lowerCamelCase :int = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
49
1
from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
49
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""") A__ = cvtColor(img, COLOR_BGR2GRAY) def _lowerCamelCase ( ): lowerCamelCase :int = cn.convert_to_negative(a_) # assert negative_img array for at least one True assert negative_img.any() def _lowerCamelCase ( ): with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img: # Work around assertion for response assert str(cc.change_contrast(a_ , 1_10)).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''') def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4) # Assert ambiguous array assert resp.all() def _lowerCamelCase ( ): lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0) # assert ambiguous array for all == True assert canny_img.all() lowerCamelCase :Optional[Any] = canny.canny(a_) # assert canny array for at least one True assert canny_array.any() def _lowerCamelCase ( ): assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all() def _lowerCamelCase ( ): # laplace diagonals lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_) assert res.any() def _lowerCamelCase ( ): assert med.median_filter(a_ , 3).any() def _lowerCamelCase ( ): lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_) assert grad.any() and theta.any() def _lowerCamelCase ( ): lowerCamelCase :Dict = sp.make_sepia(a_ , 20) assert sepia.all() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"): lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20) burkes.process() assert burkes.output_img.any() def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ): lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00) nn.process() assert nn.output.any() def _lowerCamelCase ( ): lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. lowerCamelCase :Tuple = imread(a_ , 0) # Test for get_neighbors_pixel function() return not None lowerCamelCase :Dict = 0 lowerCamelCase :Optional[Any] = 0 lowerCamelCase :str = image[x_coordinate][y_coordinate] lowerCamelCase :Any = lbp.get_neighbors_pixel( a_ , a_ , a_ , a_) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1])) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0]): for j in range(0 , image.shape[1]): lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_) assert lbp_image.any()
49
1
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class _lowerCAmelCase ( datasets.BeamBasedBuilder ): def snake_case ( self : str ): return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__snake_case , ) def snake_case ( self : int , __snake_case : Optional[Any] , __snake_case : Optional[Any] ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def snake_case ( self : int , __snake_case : int , __snake_case : Dict ): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(__snake_case ) class _lowerCAmelCase ( datasets.BeamBasedBuilder ): def snake_case ( self : Union[str, Any] ): return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__snake_case , ) def snake_case ( self : Optional[int] , __snake_case : Dict , __snake_case : Union[str, Any] ): return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def snake_case ( self : int , __snake_case : Optional[Any] , __snake_case : Dict ): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(__snake_case ) def _lowerCamelCase ( ): return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''])] def _lowerCamelCase ( ): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''])] class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): @require_beam def snake_case ( self : str ): lowerCamelCase :Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: lowerCamelCase :List[str] = DummyBeamDataset(cache_dir=__snake_case , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) lowerCamelCase :int = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , __snake_case ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __snake_case ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def snake_case ( self : Optional[int] ): import apache_beam as beam lowerCamelCase :Optional[Any] = beam.io.parquetio.WriteToParquet lowerCamelCase :Dict = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: lowerCamelCase :Tuple = DummyBeamDataset(cache_dir=__snake_case , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: lowerCamelCase :Optional[Any] = partial(__snake_case , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( __snake_case , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertTrue( os.path.exists( os.path.join( __snake_case , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) lowerCamelCase :int = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , __snake_case ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __snake_case ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def snake_case ( self : Any ): with tempfile.TemporaryDirectory() as tmp_cache_dir: lowerCamelCase :Any = DummyBeamDataset(cache_dir=__snake_case ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def snake_case ( self : Optional[int] ): lowerCamelCase :int = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: lowerCamelCase :Tuple = NestedBeamDataset(cache_dir=__snake_case , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train.arrow" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) lowerCamelCase :Tuple = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , __snake_case ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __snake_case ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
49
import os from math import logaa def _lowerCamelCase ( a_ : str = "base_exp.txt"): lowerCamelCase :float = 0 lowerCamelCase :Optional[int] = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))): lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''','''))) if x * logaa(a_) > largest: lowerCamelCase :List[Any] = x * logaa(a_) lowerCamelCase :Any = i + 1 return result if __name__ == "__main__": print(solution())
49
1
import functools def _lowerCamelCase ( a_ : list[int] , a_ : list[int]): # Validation if not isinstance(a_ , a_) or not all(isinstance(a_ , a_) for day in days): raise ValueError('''The parameter days should be a list of integers''') if len(a_) != 3 or not all(isinstance(a_ , a_) for cost in costs): raise ValueError('''The parameter costs should be a list of three integers''') if len(a_) == 0: return 0 if min(a_) <= 0: raise ValueError('''All days elements should be greater than 0''') if max(a_) >= 3_66: raise ValueError('''All days elements should be less than 366''') lowerCamelCase :Any = set(a_) @functools.cache def dynamic_programming(a_ : int) -> int: if index > 3_65: return 0 if index not in days_set: return dynamic_programming(index + 1) return min( costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , ) return dynamic_programming(1) if __name__ == "__main__": import doctest doctest.testmod()
49
def _lowerCamelCase ( a_ : list): if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''') for cell_n in range(1 , len(grid[0])): grid[0][cell_n] += grid[0][cell_n - 1] lowerCamelCase :Any = grid[0] for row_n in range(1 , len(a_)): lowerCamelCase :List[str] = grid[row_n] lowerCamelCase :Union[str, Any] = fill_row(a_ , a_) lowerCamelCase :List[Any] = grid[row_n] return grid[-1][-1] def _lowerCamelCase ( a_ : list , a_ : list): current_row[0] += row_above[0] for cell_n in range(1 , len(a_)): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n]) return current_row if __name__ == "__main__": import doctest doctest.testmod()
49
1
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Any , __snake_case : List[str] , __snake_case : int , __snake_case : Union[str, Any] ): self.assertEqual(len(__snake_case ) , len(__snake_case ) ) for a, b in zip(__snake_case , __snake_case ): self.assertAlmostEqual(__snake_case , __snake_case , delta=__snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :Dict = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__snake_case ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def snake_case ( self : Union[str, Any] ): lowerCamelCase :List[Any] = None ops.enable_eager_execution_internal() lowerCamelCase :Optional[Any] = tf.config.list_physical_devices('''CPU''' ) if len(__snake_case ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowerCamelCase :Union[str, Any] = tf.config.list_logical_devices(device_type='''CPU''' ) lowerCamelCase :Any = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowerCamelCase :Optional[Any] = GradientAccumulator() lowerCamelCase :Dict = tf.Variable([4.0, 3.0] ) lowerCamelCase , lowerCamelCase :List[str] = create_optimizer(5e-5 , 10 , 5 ) lowerCamelCase :List[str] = tf.Variable([0.0, 0.0] , trainable=__snake_case ) def accumulate_on_replica(__snake_case : Any ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__snake_case : Union[str, Any] , __snake_case : Optional[int] ): with strategy.scope(): lowerCamelCase :Optional[Any] = strategy.experimental_local_results(__snake_case ) local_variables[0].assign(__snake_case ) local_variables[1].assign(__snake_case ) strategy.run(__snake_case , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__snake_case ) def _check_local_values(__snake_case : int , __snake_case : int ): lowerCamelCase :List[Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __snake_case , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __snake_case , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
49
import math def _lowerCamelCase ( a_ : int): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a_) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCamelCase ( a_ : float = 0.1): lowerCamelCase :Dict = 3 lowerCamelCase :List[Any] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1): primes += is_prime(a_) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
49
1
from typing import Any def _lowerCamelCase ( a_ : list , a_ : list , a_ : dict , a_ : dict , a_ : dict , ): _validation( a_ , a_ , a_ , a_ , a_ , ) # Creates data structures and fill initial step lowerCamelCase :dict = {} lowerCamelCase :dict = {} for state in states_space: lowerCamelCase :Dict = observations_space[0] lowerCamelCase :Optional[int] = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCamelCase :str = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(a_)): lowerCamelCase :Dict = observations_space[o] lowerCamelCase :Tuple = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCamelCase :Union[str, Any] = '''''' lowerCamelCase :List[str] = -1 for k_state in states_space: lowerCamelCase :List[Any] = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCamelCase :Optional[Any] = probability lowerCamelCase :Optional[int] = k_state # Update probabilities and pointers dicts lowerCamelCase :str = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCamelCase :int = arg_max # The final observation lowerCamelCase :str = observations_space[len(a_) - 1] # argmax for given final observation lowerCamelCase :str = '''''' lowerCamelCase :Optional[Any] = -1 for k_state in states_space: lowerCamelCase :Union[str, Any] = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCamelCase :str = probability lowerCamelCase :List[Any] = k_state lowerCamelCase :Optional[Any] = arg_max # Process pointers backwards lowerCamelCase :List[str] = last_state lowerCamelCase :Any = [] for o in range(len(a_) - 1 , -1 , -1): result.append(a_) lowerCamelCase :List[Any] = pointers[previous, observations_space[o]] result.reverse() return result def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : Any , a_ : Any , a_ : Any , ): _validate_not_empty( a_ , a_ , a_ , a_ , a_ , ) _validate_lists(a_ , a_) _validate_dicts( a_ , a_ , a_) def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : Any , a_ : Any , a_ : Any , ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ]): raise ValueError('''There\'s an empty parameter''') def _lowerCamelCase ( a_ : Any , a_ : Any): _validate_list(a_ , '''observations_space''') _validate_list(a_ , '''states_space''') def _lowerCamelCase ( a_ : Any , a_ : str): if not isinstance(_object , a_): lowerCamelCase :Dict = F"{var_name} must be a list" raise ValueError(a_) else: for x in _object: if not isinstance(a_ , a_): lowerCamelCase :List[str] = F"{var_name} must be a list of strings" raise ValueError(a_) def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : Any , ): _validate_dict(a_ , '''initial_probabilities''' , a_) _validate_nested_dict(a_ , '''transition_probabilities''') _validate_nested_dict(a_ , '''emission_probabilities''') def _lowerCamelCase ( a_ : Any , a_ : str): _validate_dict(_object , a_ , a_) for x in _object.values(): _validate_dict(a_ , a_ , a_ , a_) def _lowerCamelCase ( a_ : Any , a_ : str , a_ : type , a_ : bool = False): if not isinstance(_object , a_): lowerCamelCase :str = F"{var_name} must be a dict" raise ValueError(a_) if not all(isinstance(a_ , a_) for x in _object): lowerCamelCase :Tuple = F"{var_name} all keys must be strings" raise ValueError(a_) if not all(isinstance(a_ , a_) for x in _object.values()): lowerCamelCase :Dict = '''nested dictionary ''' if nested else '''''' lowerCamelCase :Optional[int] = F"{var_name} {nested_text}all values must be {value_type.__name__}" raise ValueError(a_) if __name__ == "__main__": from doctest import testmod testmod()
49
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[Any] = -1 lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :str = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :str = TextStreamer(__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :Optional[int] = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Dict ): lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[Any] = -1 lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] ) lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case ) lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() lowerCamelCase :Any = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : str ): lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :List[str] = -1 lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case ) lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :] lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case ) model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase :int = cs.out[:-1] self.assertEqual(__snake_case , __snake_case ) def snake_case ( self : Optional[int] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case ) model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n" lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def snake_case ( self : List[Any] ): lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case ) lowerCamelCase :Optional[int] = -1 lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case ) lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 ) lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__snake_case ): lowerCamelCase :Dict = '''''' for new_text in streamer: streamer_text += new_text
49
1
from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def _lowerCamelCase ( a_ : Sequence[float] , a_ : int , a_ : int): if not arr: return None, None, 0 if low == high: return low, high, arr[low] lowerCamelCase :Dict = (low + high) // 2 lowerCamelCase , lowerCamelCase , lowerCamelCase :Union[str, Any] = max_subarray(a_ , a_ , a_) lowerCamelCase , lowerCamelCase , lowerCamelCase :Optional[int] = max_subarray(a_ , mid + 1 , a_) lowerCamelCase , lowerCamelCase , lowerCamelCase :List[str] = max_cross_sum(a_ , a_ , a_ , a_) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def _lowerCamelCase ( a_ : Sequence[float] , a_ : int , a_ : int , a_ : int): lowerCamelCase , lowerCamelCase :str = float('''-inf'''), -1 lowerCamelCase , lowerCamelCase :Any = float('''-inf'''), -1 lowerCamelCase :int | float = 0 for i in range(a_ , low - 1 , -1): summ += arr[i] if summ > left_sum: lowerCamelCase :List[str] = summ lowerCamelCase :int = i lowerCamelCase :int = 0 for i in range(mid + 1 , high + 1): summ += arr[i] if summ > right_sum: lowerCamelCase :List[str] = summ lowerCamelCase :Dict = i return max_left, max_right, (left_sum + right_sum) def _lowerCamelCase ( a_ : int): lowerCamelCase :List[Any] = [randint(1 , a_) for _ in range(a_)] lowerCamelCase :int = time.time() max_subarray(a_ , 0 , input_size - 1) lowerCamelCase :List[Any] = time.time() return end - start def _lowerCamelCase ( ): lowerCamelCase :Optional[Any] = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00] lowerCamelCase :Dict = [time_max_subarray(a_) for input_size in input_sizes] print('''No of Inputs\t\tTime Taken''') for input_size, runtime in zip(a_ , a_): print(a_ , '''\t\t''' , a_) plt.plot(a_ , a_) plt.xlabel('''Number of Inputs''') plt.ylabel('''Time taken in seconds''') plt.show() if __name__ == "__main__": from doctest import testmod testmod()
49
from maths.prime_factors import prime_factors def _lowerCamelCase ( a_ : int): if not isinstance(a_ , a_): lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer" raise TypeError(a_) if number < 1: raise ValueError('''Input must be a positive integer''') return -1 if len(prime_factors(a_)) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
49
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def snake_case ( *__snake_case : str , **__snake_case : str ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def snake_case ( self : Union[str, Any] ): lowerCamelCase :Optional[int] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__snake_case ) , [ [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}], [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}], ] , ) lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], ] , ) @require_tf def snake_case ( self : Tuple ): lowerCamelCase :Tuple = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , ) lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, {'''score''': 0.3_3_3, '''label''': ANY(__snake_case )}, ], ] , ) @slow @require_torch def snake_case ( self : Any ): lowerCamelCase :str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__snake_case ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case ( self : Optional[Any] ): lowerCamelCase :Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__snake_case ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , )
49
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() A__ = logging.get_logger(__name__) def _lowerCamelCase ( a_ : str , a_ : str=False): lowerCamelCase :Optional[int] = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''')) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''')) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''')) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''')) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''')) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''')) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''')) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight")) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias")) # transformer encoder for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias")) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ]) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ]) # fmt: on return rename_keys def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False): for i in range(config.num_hidden_layers): if base_model: lowerCamelCase :Union[str, Any] = '''''' else: lowerCamelCase :Optional[int] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight") lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict lowerCamelCase :Any = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size] lowerCamelCase :int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase :Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase :Optional[Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :] def _lowerCamelCase ( a_ : int): lowerCamelCase :Any = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a_ , a_) def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple): lowerCamelCase :Optional[Any] = dct.pop(a_) lowerCamelCase :str = val def _lowerCamelCase ( ): lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw) return im @torch.no_grad() def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False): lowerCamelCase :Optional[int] = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , ) lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00) lowerCamelCase :List[Any] = False # load original model from timm lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCamelCase :List[str] = timm_model.state_dict() if base_model: remove_classification_head_(a_) lowerCamelCase :Tuple = create_rename_keys(a_ , a_) for src, dest in rename_keys: rename_key(a_ , a_ , a_) read_in_q_k_v(a_ , a_ , a_) lowerCamelCase :List[str] = '''huggingface/label-files''' lowerCamelCase :Any = '''imagenet-1k-id2label.json''' lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r''')) lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()} lowerCamelCase :Optional[int] = idalabel lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval() else: lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval() model.load_state_dict(a_) # create image processor lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_)) lowerCamelCase :str = transform.transforms lowerCamelCase :int = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowerCamelCase :Any = ViTHybridImageProcessor( do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowerCamelCase :Dict = prepare_img() lowerCamelCase :str = transform(a_).unsqueeze(0) lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values # verify pixel values assert torch.allclose(a_ , a_) # verify logits with torch.no_grad(): lowerCamelCase :Optional[int] = model(a_) lowerCamelCase :Union[str, Any] = outputs.logits print('''Predicted class:''' , logits.argmax(-1).item()) if base_model: lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3) else: lowerCamelCase :List[str] = timm_model(a_) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a_ , outputs.logits , atol=1e-3) print('''Looks ok!''') if pytorch_dump_folder_path is not None: Path(a_).mkdir(exist_ok=a_) print(F"Saving model {vit_name} to {pytorch_dump_folder_path}") model.save_pretrained(a_) print(F"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(a_) if push_to_hub: print(F"Pushing model and processor to the hub {vit_name}") model.push_to_hub(F"ybelkada/{vit_name}") processor.push_to_hub(F"ybelkada/{vit_name}") if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) A__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
49
1