code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): # clean up the VRAM after each test super().tearDown() gc.collect() def UpperCAmelCase_ (self ): UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) UpperCamelCase__ = """A painting of a squirrel eating a burger""" UpperCamelCase__ = jax.device_count() UpperCamelCase__ = num_samples * [prompt] UpperCamelCase__ = sd_pipe.prepare_inputs(__A ) UpperCamelCase__ = replicate(__A ) UpperCamelCase__ = shard(__A ) UpperCamelCase__ = jax.random.PRNGKey(0 ) UpperCamelCase__ = jax.random.split(__A , jax.device_count() ) UpperCamelCase__ = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1] UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCamelCase__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def UpperCAmelCase_ (self ): UpperCamelCase__ = """stabilityai/stable-diffusion-2""" UpperCamelCase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) UpperCamelCase__ = scheduler_params UpperCamelCase__ = """A painting of a squirrel eating a burger""" UpperCamelCase__ = jax.device_count() UpperCamelCase__ = num_samples * [prompt] UpperCamelCase__ = sd_pipe.prepare_inputs(__A ) UpperCamelCase__ = replicate(__A ) UpperCamelCase__ = shard(__A ) UpperCamelCase__ = jax.random.PRNGKey(0 ) UpperCamelCase__ = jax.random.split(__A , jax.device_count() ) UpperCamelCase__ = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1] UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCamelCase__ = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
244
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :List[Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
84
0
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __A : int = logging.get_logger(__name__) __A : Optional[int] = Dict[str, Any] __A : Tuple = List[Prediction] @add_end_docstrings(_A ) class __UpperCamelCase ( _A ): def __init__(self : int , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any): super().__init__(*__A , **__A) if self.framework == "tf": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") requires_backends(self , "vision") self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items())) def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[int]): A = {} if "threshold" in kwargs: A = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__(self : Any , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]): return super().__call__(*__A , **__A) def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str): A = load_image(__A) A = torch.IntTensor([[image.height, image.width]]) A = self.image_processor(images=[image] , return_tensors="pt") if self.tokenizer is not None: A = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt") A = target_size return inputs def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : Any): A = model_inputs.pop("target_size") A = self.model(**__A) A = outputs.__class__({"target_size": target_size, **outputs}) if self.tokenizer is not None: A = model_inputs["bbox"] return model_outputs def SCREAMING_SNAKE_CASE__ (self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=0.9): A = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A , A = target_size[0].tolist() def unnormalize(__SCREAMING_SNAKE_CASE : str): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1_0_0_0), (height * bbox[1] / 1_0_0_0), (width * bbox[2] / 1_0_0_0), (height * bbox[3] / 1_0_0_0), ])) A , A = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1) A = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A = [unnormalize(__A) for bbox in model_outputs["bbox"].squeeze(0)] A = ["score", "label", "box"] A = [dict(zip(__A , __A)) for vals in zip(scores.tolist() , __A , __A) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A = self.image_processor.post_process_object_detection(__A , __A , __A) A = raw_annotations[0] A = raw_annotation["scores"] A = raw_annotation["labels"] A = raw_annotation["boxes"] A = scores.tolist() A = [self.model.config.idalabel[label.item()] for label in labels] A = [self._get_bounding_box(__A) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A = ["score", "label", "box"] A = [ dict(zip(__A , __A)) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"]) ] return annotation def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : "torch.Tensor"): if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.") A , A , A , A = box.int().tolist() A = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
358
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __A : Any = imread(R'digital_image_processing/image_data/lena_small.jpg') __A : Tuple = cvtColor(img, COLOR_BGR2GRAY) def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" A = cn.convert_to_negative(lowercase__ ) # assert negative_img array for at least one True assert negative_img.any() def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase__ , 110 ) ).startswith( "<PIL.Image.Image image mode=RGB size=100x100 at" ) def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" A = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" A = imread("digital_image_processing/image_data/lena_small.jpg" , 0 ) # assert ambiguous array for all == True assert canny_img.all() A = canny.canny(lowercase__ ) # assert canny array for at least one True assert canny_array.any() def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all() def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" # laplace diagonals A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) A = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ ) assert res.any() def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" assert med.median_filter(lowercase__ , 3 ).any() def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" A , A = sob.sobel_filter(lowercase__ ) assert grad.any() and theta.any() def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" A = sp.make_sepia(lowercase__ , 20 ) assert sepia.all() def __SCREAMING_SNAKE_CASE ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" A = bs.Burkes(imread(lowercase__ , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def __SCREAMING_SNAKE_CASE ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" A = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" A = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. A = imread(lowercase__ , 0 ) # Test for get_neighbors_pixel function() return not None A = 0 A = 0 A = image[x_coordinate][y_coordinate] A = lbp.get_neighbors_pixel( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image A = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): A = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ ) assert lbp_image.any()
57
0
def lowerCAmelCase_ ( _snake_case : Optional[int]=28123 ) -> Optional[Any]: '''simple docstring''' __magic_name__ : Optional[Any] = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i __magic_name__ : int = set() __magic_name__ : List[str] = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(_snake_case ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
281
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class _snake_case : UpperCamelCase__ = LEDConfig UpperCamelCase__ = {} UpperCamelCase__ = 'gelu' def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ): __magic_name__ : int = parent __magic_name__ : Optional[int] = batch_size __magic_name__ : Tuple = seq_length __magic_name__ : List[Any] = is_training __magic_name__ : Dict = use_labels __magic_name__ : Optional[Any] = vocab_size __magic_name__ : int = hidden_size __magic_name__ : Optional[int] = num_hidden_layers __magic_name__ : Optional[int] = num_attention_heads __magic_name__ : Tuple = intermediate_size __magic_name__ : Any = hidden_dropout_prob __magic_name__ : Optional[int] = attention_probs_dropout_prob __magic_name__ : List[str] = max_position_embeddings __magic_name__ : Any = eos_token_id __magic_name__ : str = pad_token_id __magic_name__ : int = bos_token_id __magic_name__ : Optional[int] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after __magic_name__ : Tuple = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests __magic_name__ : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __magic_name__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __magic_name__ : int = tf.concat([input_ids, eos_tensor] , axis=1 ) __magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Dict = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) __magic_name__ : List[str] = prepare_led_inputs_dict(_a , _a , _a ) __magic_name__ : Union[str, Any] = tf.concat( [tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , ) __magic_name__ : List[Any] = global_attention_mask return config, inputs_dict def SCREAMING_SNAKE_CASE ( self , _a , _a ): __magic_name__ : Dict = TFLEDModel(config=_a ).get_decoder() __magic_name__ : Optional[int] = inputs_dict["input_ids"] __magic_name__ : Union[str, Any] = input_ids[:1, :] __magic_name__ : str = inputs_dict["attention_mask"][:1, :] __magic_name__ : int = 1 # first forward pass __magic_name__ : Tuple = model(_a , attention_mask=_a , use_cache=_a ) __magic_name__ , __magic_name__ : str = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __magic_name__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __magic_name__ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __magic_name__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) __magic_name__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __magic_name__ : List[str] = model(_a , attention_mask=_a )[0] __magic_name__ : Dict = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __magic_name__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __magic_name__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] __magic_name__ : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1e-3 ) def lowerCAmelCase_ ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Any , _snake_case : str=None , _snake_case : List[str]=None , _snake_case : int=None , _snake_case : Any=None , ) -> int: '''simple docstring''' if attention_mask is None: __magic_name__ : str = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __magic_name__ : List[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __magic_name__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __magic_name__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class _snake_case ( snake_case , snake_case , unittest.TestCase ): UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else () UpperCamelCase__ = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = TFLEDModelTester(self ) __magic_name__ : List[Any] = ConfigTester(self , config_class=_a ) def SCREAMING_SNAKE_CASE ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : List[str] = tf.zeros_like(inputs_dict["attention_mask"] ) __magic_name__ : Optional[Any] = 2 __magic_name__ : Tuple = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , ) __magic_name__ : Any = True __magic_name__ : str = self.model_tester.seq_length __magic_name__ : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(_a ): __magic_name__ : str = outputs.decoder_attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(_a ): __magic_name__ : Any = [t.numpy() for t in outputs.encoder_attentions] __magic_name__ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: __magic_name__ : Union[str, Any] = True __magic_name__ : List[str] = False __magic_name__ : Tuple = False __magic_name__ : Optional[int] = model_class(_a ) __magic_name__ : str = model(self._prepare_for_class(_a , _a ) ) __magic_name__ : Any = len(_a ) self.assertEqual(config.output_hidden_states , _a ) check_encoder_attentions_output(_a ) if self.is_encoder_decoder: __magic_name__ : Tuple = model_class(_a ) __magic_name__ : Optional[Any] = model(self._prepare_for_class(_a , _a ) ) self.assertEqual(config.output_hidden_states , _a ) check_decoder_attentions_output(_a ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __magic_name__ : Dict = True __magic_name__ : str = model_class(_a ) __magic_name__ : Any = model(self._prepare_for_class(_a , _a ) ) self.assertEqual(config.output_hidden_states , _a ) check_encoder_attentions_output(_a ) # Check attention is always last and order is fine __magic_name__ : Union[str, Any] = True __magic_name__ : Union[str, Any] = True __magic_name__ : List[str] = model_class(_a ) __magic_name__ : Any = model(self._prepare_for_class(_a , _a ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) ) self.assertEqual(model.config.output_hidden_states , _a ) check_encoder_attentions_output(_a ) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." ) def SCREAMING_SNAKE_CASE ( self ): pass def SCREAMING_SNAKE_CASE ( self ): # TODO: Head-masking not yet implement pass def lowerCAmelCase_ ( _snake_case : int ) -> Optional[int]: '''simple docstring''' return tf.constant(_snake_case , dtype=tf.intaa ) snake_case : Optional[int] = 1E-4 @slow @require_tf class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led # change to intended input here __magic_name__ : Optional[int] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) __magic_name__ : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) __magic_name__ : Any = prepare_led_inputs_dict(model.config , _a , _a ) __magic_name__ : List[Any] = model(**_a )[0] __magic_name__ : List[str] = (1, 1_024, 768) self.assertEqual(output.shape , _a ) # change to expected output here __magic_name__ : int = tf.convert_to_tensor( [[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , ) tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Tuple = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ) # change to intended input here __magic_name__ : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) __magic_name__ : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) __magic_name__ : Optional[Any] = prepare_led_inputs_dict(model.config , _a , _a ) __magic_name__ : Union[str, Any] = model(**_a )[0] __magic_name__ : Optional[int] = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , _a ) # change to expected output here __magic_name__ : str = tf.convert_to_tensor( [[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , ) tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
281
1
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( _snake_case , unittest.TestCase ): lowercase = KandinskyVaaPriorPipeline lowercase = ["prompt"] lowercase = ["prompt", "negative_prompt"] lowercase = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] lowercase = False @property def snake_case_ ( self ) -> Dict: '''simple docstring''' return 32 @property def snake_case_ ( self ) -> Dict: '''simple docstring''' return 32 @property def snake_case_ ( self ) -> str: '''simple docstring''' return self.time_input_dim @property def snake_case_ ( self ) -> str: '''simple docstring''' return self.time_input_dim * 4 @property def snake_case_ ( self ) -> int: '''simple docstring''' return 100 @property def snake_case_ ( self ) -> Any: '''simple docstring''' A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def snake_case_ ( self ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) A_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase__ ) @property def snake_case_ ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) A_ = { """num_attention_heads""": 2, """attention_head_dim""": 12, """embedding_dim""": self.text_embedder_hidden_size, """num_layers""": 1, } A_ = PriorTransformer(**UpperCamelCase__ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 A_ = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def snake_case_ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) A_ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) A_ = CLIPVisionModelWithProjection(UpperCamelCase__ ) return model @property def snake_case_ ( self ) -> Optional[int]: '''simple docstring''' A_ = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , ) return image_processor def snake_case_ ( self ) -> Optional[int]: '''simple docstring''' A_ = self.dummy_prior A_ = self.dummy_image_encoder A_ = self.dummy_text_encoder A_ = self.dummy_tokenizer A_ = self.dummy_image_processor A_ = UnCLIPScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , ) A_ = { """prior""": prior, """image_encoder""": image_encoder, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """scheduler""": scheduler, """image_processor""": image_processor, } return components def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Any: '''simple docstring''' if str(UpperCamelCase__ ).startswith("""mps""" ): A_ = torch.manual_seed(UpperCamelCase__ ) else: A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) A_ = { """prompt""": """horse""", """generator""": generator, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def snake_case_ ( self ) -> Tuple: '''simple docstring''' A_ = """cpu""" A_ = self.get_dummy_components() A_ = self.pipeline_class(**UpperCamelCase__ ) A_ = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) ) A_ = output.image_embeds A_ = pipe( **self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0] A_ = image[0, -10:] A_ = image_from_tuple[0, -10:] assert image.shape == (1, 32) A_ = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def snake_case_ ( self ) -> List[str]: '''simple docstring''' A_ = torch_device == """cpu""" A_ = True A_ = False self._test_inference_batch_single_identical( test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , ) @skip_mps def snake_case_ ( self ) -> Tuple: '''simple docstring''' A_ = torch_device == """cpu""" A_ = False self._test_attention_slicing_forward_pass( test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
101
'''simple docstring''' import math import sys def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int: if number != int(UpperCAmelCase__ ): raise ValueError("""the value of input must be a natural number""" ) if number < 0: raise ValueError("""the value of input must not be a negative number""" ) if number == 0: return 1 A_ = [-1] * (number + 1) A_ = 0 for i in range(1, number + 1 ): A_ = sys.maxsize A_ = int(math.sqrt(UpperCAmelCase__ ) ) for j in range(1, root + 1 ): A_ = 1 + answers[i - (j**2)] A_ = min(UpperCAmelCase__, UpperCAmelCase__ ) A_ = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
101
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart SCREAMING_SNAKE_CASE__ = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } SCREAMING_SNAKE_CASE__ = { 'facebook/bart-base': 1_0_2_4, 'facebook/bart-large': 1_0_2_4, 'facebook/bart-large-mnli': 1_0_2_4, 'facebook/bart-large-cnn': 1_0_2_4, 'facebook/bart-large-xsum': 1_0_2_4, 'yjernite/bart_eli5': 1_0_2_4, } class a_ ( lowerCamelCase ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["""input_ids""", """attention_mask"""] lowercase = BartTokenizer def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" super().__init__( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space: UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) ) UpperCamelCase = add_prefix_space UpperCamelCase = pre_tok_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCamelCase = """post_processor""" UpperCamelCase = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if tokenizer_component_instance: UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase = tuple(state["""sep"""] ) if "cls" in state: UpperCamelCase = tuple(state["""cls"""] ) UpperCamelCase = False if state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space: UpperCamelCase = add_prefix_space UpperCamelCase = True if state.get("""trim_offsets""" , _SCREAMING_SNAKE_CASE ) != trim_offsets: UpperCamelCase = trim_offsets UpperCamelCase = True if changes_to_apply: UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , state.pop("""type""" ) ) UpperCamelCase = component_class(**_SCREAMING_SNAKE_CASE ) setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @property def A__ ( self ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value UpperCamelCase = value def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding: """simple docstring""" UpperCamelCase = kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding: """simple docstring""" UpperCamelCase = kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE ) return tuple(_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
321
'''simple docstring''' SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1 def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
321
1
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Tuple = HfArgumentParser(lowerCAmelCase ) __magic_name__ : str = parser.parse_args_into_dataclasses()[0] __magic_name__ : Optional[Any] = TensorFlowBenchmark(args=lowerCAmelCase ) try: __magic_name__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: __magic_name__ : Tuple = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' __magic_name__ : int = ' '.join(str(lowerCAmelCase ).split(' ' )[:-1] ) __magic_name__ : str = '' __magic_name__ : str = eval(str(lowerCAmelCase ).split(' ' )[-1] ) __magic_name__ : List[Any] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowerCAmelCase ) if len(lowerCAmelCase ) > 0: __magic_name__ : Dict = full_error_msg + begin_error_msg + str(lowerCAmelCase ) raise ValueError(lowerCAmelCase ) benchmark.run() if __name__ == "__main__": main()
275
'''simple docstring''' def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Any=False ): """simple docstring""" if isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ): __magic_name__ : str = len(set_a.intersection(lowerCAmelCase ) ) if alternative_union: __magic_name__ : List[str] = len(lowerCAmelCase ) + len(lowerCAmelCase ) else: __magic_name__ : Any = len(set_a.union(lowerCAmelCase ) ) return intersection / union if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(lowerCAmelCase , (list, tuple) ): __magic_name__ : str = [element for element in set_a if element in set_b] if alternative_union: __magic_name__ : Dict = len(lowerCAmelCase ) + len(lowerCAmelCase ) return len(lowerCAmelCase ) / union else: __magic_name__ : Any = set_a + [element for element in set_b if element not in set_a] return len(lowerCAmelCase ) / len(lowerCAmelCase ) return len(lowerCAmelCase ) / len(lowerCAmelCase ) return None if __name__ == "__main__": lowerCAmelCase :Dict = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCAmelCase :Tuple = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
275
1
'''simple docstring''' from collections.abc import Callable class __UpperCamelCase : def __init__( self, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =[] # Stores indexes of each item for supporting updates and deletion. lowerCamelCase_ ={} # Stores current size of heap. lowerCamelCase_ =0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. lowerCamelCase_ =key or (lambda lowerCAmelCase : x) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return int((i - 1) / 2 ) if i > 0 else None def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =int(2 * i + 1 ) return left if 0 < left < self.size else None def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =int(2 * i + 2 ) return right if 0 < right < self.size else None def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. lowerCamelCase_, lowerCamelCase_ =self.arr[j], self.arr[i] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return self.arr[i][1] < self.arr[j][1] def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self._left(lowerCAmelCase ) lowerCamelCase_ =self._right(lowerCAmelCase ) lowerCamelCase_ =i if left is not None and not self._cmp(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =left if right is not None and not self._cmp(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =right return valid_parent def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self._parent(lowerCAmelCase ) while parent is not None and not self._cmp(lowerCAmelCase, lowerCAmelCase ): self._swap(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =parent, self._parent(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self._get_valid_parent(lowerCAmelCase ) while valid_parent != index: self._swap(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =valid_parent, self._get_valid_parent(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if item not in self.pos_map: return lowerCamelCase_ =self.pos_map[item] lowerCamelCase_ =[item, self.key(lowerCAmelCase )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(lowerCAmelCase ) self._heapify_down(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if item not in self.pos_map: return lowerCamelCase_ =self.pos_map[item] del self.pos_map[item] lowerCamelCase_ =self.arr[self.size - 1] lowerCamelCase_ =index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(lowerCAmelCase ) self._heapify_down(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(lowerCAmelCase )] ) else: lowerCamelCase_ =[item, self.key(lowerCAmelCase )] lowerCamelCase_ =self.size self.size += 1 self._heapify_up(self.size - 1 ) def lowercase__ ( self ): """simple docstring""" return self.arr[0] if self.size else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def a_ ( ) -> None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' a_ : Any = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] a_ : Any = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] a_ : Optional[Any] = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] a_ : str = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] a_ : Optional[int] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] a_ : Dict = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] a_ : Tuple = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] a_ : Any = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
75
1
"""simple docstring""" from maths.prime_check import is_prime def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowercase_ = F'''Input value of [number={number}] must be an integer''' raise TypeError(__lowerCAmelCase ) if is_prime(__lowerCAmelCase ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
352
"""simple docstring""" import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase : List[Any] = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } UpperCAmelCase : Union[str, Any] = { "allenai/led-base-16384": 1_6384, } class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = LEDTokenizer lowercase__ = ["input_ids", "attention_mask"] def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , ) lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space: lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""")) lowercase_ = add_prefix_space lowercase_ = pre_tok_class(**lowerCAmelCase_) lowercase_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase_ = """post_processor""" lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_) if tokenizer_component_instance: lowercase_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ = tuple(state["""sep"""]) if "cls" in state: lowercase_ = tuple(state["""cls"""]) lowercase_ = False if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space: lowercase_ = add_prefix_space lowercase_ = True if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets: lowercase_ = trim_offsets lowercase_ = True if changes_to_apply: lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type""")) lowercase_ = component_class(**lowerCAmelCase_) setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCAmelCase ( self : List[str]): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""") return None return str(self._mask_token) @mask_token.setter def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str): """simple docstring""" lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value lowercase_ = value def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]): """simple docstring""" lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""") return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_) def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any): """simple docstring""" lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""") return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None): """simple docstring""" lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_) return tuple(lowerCAmelCase_) def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None): """simple docstring""" lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None): """simple docstring""" lowercase_ = [self.sep_token_id] lowercase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ): """simple docstring""" lowercase_ = super()._pad( encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) # Load from model defaults if return_attention_mask is None: lowercase_ = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase_ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_) if needs_to_be_padded: lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""]) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase_ = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": lowercase_ = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side)) return encoded_inputs
313
0
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) a ={ """sample_size""": 32, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1000, """block_out_channels""": [32, 64], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a ={ """sample_size""": 64, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1000, """block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a ={ """sample_size""": 256, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a ={ """num_train_timesteps""": 40, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } a ={ """num_train_timesteps""": 201, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } a ={ """num_train_timesteps""": 151, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple: __lowerCamelCase : List[str] = checkpoint[F"{old_prefix}.in_layers.0.weight"] __lowerCamelCase : Any = checkpoint[F"{old_prefix}.in_layers.0.bias"] __lowerCamelCase : Optional[int] = checkpoint[F"{old_prefix}.in_layers.2.weight"] __lowerCamelCase : Tuple = checkpoint[F"{old_prefix}.in_layers.2.bias"] __lowerCamelCase : Optional[Any] = checkpoint[F"{old_prefix}.emb_layers.1.weight"] __lowerCamelCase : List[Any] = checkpoint[F"{old_prefix}.emb_layers.1.bias"] __lowerCamelCase : List[Any] = checkpoint[F"{old_prefix}.out_layers.0.weight"] __lowerCamelCase : str = checkpoint[F"{old_prefix}.out_layers.0.bias"] __lowerCamelCase : Optional[Any] = checkpoint[F"{old_prefix}.out_layers.3.weight"] __lowerCamelCase : Optional[int] = checkpoint[F"{old_prefix}.out_layers.3.bias"] if has_skip: __lowerCamelCase : str = checkpoint[F"{old_prefix}.skip_connection.weight"] __lowerCamelCase : Optional[int] = checkpoint[F"{old_prefix}.skip_connection.bias"] return new_checkpoint def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Tuple: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 ) __lowerCamelCase : int = checkpoint[F"{old_prefix}.norm.weight"] __lowerCamelCase : List[Any] = checkpoint[F"{old_prefix}.norm.bias"] __lowerCamelCase : Dict = weight_q.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase : int = bias_q.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase : Tuple = weight_v.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase : Any = bias_v.squeeze(-1 ).squeeze(-1 ) __lowerCamelCase : Union[str, Any] = ( checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 ) ) __lowerCamelCase : Union[str, Any] = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: __lowerCamelCase : int = torch.load(lowerCamelCase__ , map_location='cpu' ) __lowerCamelCase : Optional[int] = {} __lowerCamelCase : Dict = checkpoint['time_embed.0.weight'] __lowerCamelCase : Optional[Any] = checkpoint['time_embed.0.bias'] __lowerCamelCase : Dict = checkpoint['time_embed.2.weight'] __lowerCamelCase : int = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: __lowerCamelCase : Optional[Any] = checkpoint['label_emb.weight'] __lowerCamelCase : str = checkpoint['input_blocks.0.0.weight'] __lowerCamelCase : List[Any] = checkpoint['input_blocks.0.0.bias'] __lowerCamelCase : Tuple = unet_config['down_block_types'] __lowerCamelCase : Optional[Any] = unet_config['layers_per_block'] __lowerCamelCase : Any = unet_config['attention_head_dim'] __lowerCamelCase : Any = unet_config['block_out_channels'] __lowerCamelCase : Union[str, Any] = 1 __lowerCamelCase : Tuple = channels_list[0] for i, layer_type in enumerate(lowerCamelCase__ ): __lowerCamelCase : str = channels_list[i] __lowerCamelCase : List[str] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(lowerCamelCase__ ): __lowerCamelCase : List[Any] = F"down_blocks.{i}.resnets.{j}" __lowerCamelCase : int = F"input_blocks.{current_layer}.0" __lowerCamelCase : int = True if j == 0 and downsample_block_has_skip else False __lowerCamelCase : List[Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] = F"down_blocks.{i}.resnets.{j}" __lowerCamelCase : Optional[int] = F"input_blocks.{current_layer}.0" __lowerCamelCase : Optional[Any] = True if j == 0 and downsample_block_has_skip else False __lowerCamelCase : List[Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) __lowerCamelCase : Any = F"down_blocks.{i}.attentions.{j}" __lowerCamelCase : Union[str, Any] = F"input_blocks.{current_layer}.1" __lowerCamelCase : List[Any] = convert_attention( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) current_layer += 1 if i != len(lowerCamelCase__ ) - 1: __lowerCamelCase : Tuple = F"down_blocks.{i}.downsamplers.0" __lowerCamelCase : Any = F"input_blocks.{current_layer}.0" __lowerCamelCase : Any = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) current_layer += 1 __lowerCamelCase : Union[str, Any] = current_channels # hardcoded the mid-block for now __lowerCamelCase : Optional[Any] = 'mid_block.resnets.0' __lowerCamelCase : Any = 'middle_block.0' __lowerCamelCase : Any = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : str = 'mid_block.attentions.0' __lowerCamelCase : Union[str, Any] = 'middle_block.1' __lowerCamelCase : str = convert_attention(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : Optional[Any] = 'mid_block.resnets.1' __lowerCamelCase : Optional[int] = 'middle_block.2' __lowerCamelCase : Union[str, Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : str = 0 __lowerCamelCase : Union[str, Any] = unet_config['up_block_types'] for i, layer_type in enumerate(lowerCamelCase__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): __lowerCamelCase : Optional[int] = F"up_blocks.{i}.resnets.{j}" __lowerCamelCase : str = F"output_blocks.{current_layer}.0" __lowerCamelCase : Union[str, Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) current_layer += 1 if i != len(lowerCamelCase__ ) - 1: __lowerCamelCase : List[str] = F"up_blocks.{i}.upsamplers.0" __lowerCamelCase : str = F"output_blocks.{current_layer-1}.1" __lowerCamelCase : Dict = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): __lowerCamelCase : Dict = F"up_blocks.{i}.resnets.{j}" __lowerCamelCase : int = F"output_blocks.{current_layer}.0" __lowerCamelCase : Optional[int] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) __lowerCamelCase : List[str] = F"up_blocks.{i}.attentions.{j}" __lowerCamelCase : Dict = F"output_blocks.{current_layer}.1" __lowerCamelCase : Dict = convert_attention( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) current_layer += 1 if i != len(lowerCamelCase__ ) - 1: __lowerCamelCase : int = F"up_blocks.{i}.upsamplers.0" __lowerCamelCase : str = F"output_blocks.{current_layer-1}.2" __lowerCamelCase : Optional[int] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : Tuple = checkpoint['out.0.weight'] __lowerCamelCase : Dict = checkpoint['out.0.bias'] __lowerCamelCase : Optional[int] = checkpoint['out.2.weight'] __lowerCamelCase : List[str] = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": a =argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") a =parser.parse_args() a =strabool(args.class_cond) a =os.path.basename(args.unet_path) print(F"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: a =IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a =LSUN_256_UNET_CONFIG elif "test" in ckpt_name: a =TEST_UNET_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: a =None a =con_pt_to_diffuser(args.unet_path, unet_config) a =UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: a =CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: a =CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a =CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") a =CMStochasticIterativeScheduler(**scheduler_config) a =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
73
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError("iterations must be defined as integers" ) if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) __lowerCAmelCase = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCamelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
57
0
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase: str = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Dict = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) _lowercase : List[str] = MaskFormerConfig(backbone_config=__UpperCAmelCase ) _lowercase : str = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok _lowercase : str = 847 _lowercase : str = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok _lowercase : Dict = 150 _lowercase : Union[str, Any] = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok _lowercase : Dict = 171 _lowercase : str = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO _lowercase : str = 133 _lowercase : Union[str, Any] = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok _lowercase : Dict = 19 _lowercase : Any = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok _lowercase : str = 65 _lowercase : List[str] = """mapillary-vistas-id2label.json""" _lowercase : Dict = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowercase : Optional[int] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} return config def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Dict = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") ) # cross-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") ) # MLP 1 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") ) # MLP 2 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") ) # layernorm 1 (self-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") ) # layernorm 3 (final layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") ) # fmt: on return rename_keys def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Any = dct.pop(__UpperCAmelCase ) _lowercase : Any = val def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowercase : int = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowercase : List[str] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" ) _lowercase : int = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _lowercase : Optional[int] = in_proj_weight[:dim, :] _lowercase : Optional[Any] = in_proj_bias[: dim] _lowercase : Tuple = in_proj_weight[ dim : dim * 2, : ] _lowercase : List[str] = in_proj_bias[ dim : dim * 2 ] _lowercase : List[Any] = in_proj_weight[ -dim :, : ] _lowercase : int = in_proj_bias[-dim :] # fmt: on def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): # fmt: off _lowercase : List[Any] = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) _lowercase : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" ) _lowercase : Dict = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _lowercase : Dict = in_proj_weight[: hidden_size, :] _lowercase : Optional[Any] = in_proj_bias[:config.hidden_size] _lowercase : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :] _lowercase : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2] _lowercase : Any = in_proj_weight[-hidden_size :, :] _lowercase : str = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) _lowercase : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" ) _lowercase : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _lowercase : str = in_proj_weight[: hidden_size, :] _lowercase : str = in_proj_bias[:config.hidden_size] _lowercase : Any = in_proj_weight[hidden_size : hidden_size * 2, :] _lowercase : str = in_proj_bias[hidden_size : hidden_size * 2] _lowercase : Union[str, Any] = in_proj_weight[-hidden_size :, :] _lowercase : int = in_proj_bias[-hidden_size :] # fmt: on def __SCREAMING_SNAKE_CASE ( ): _lowercase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowercase : Union[str, Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ): _lowercase : Union[str, Any] = get_maskformer_config(__UpperCAmelCase ) # load original state_dict with open(__UpperCAmelCase , """rb""" ) as f: _lowercase : Any = pickle.load(__UpperCAmelCase ) _lowercase : str = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys _lowercase : Optional[Any] = create_rename_keys(__UpperCAmelCase ) for src, dest in rename_keys: rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) read_in_swin_q_k_v(__UpperCAmelCase , config.backbone_config ) read_in_decoder_q_k_v(__UpperCAmelCase , __UpperCAmelCase ) # update to torch tensors for key, value in state_dict.items(): _lowercase : Union[str, Any] = torch.from_numpy(__UpperCAmelCase ) # load 🤗 model _lowercase : str = MaskFormerForInstanceSegmentation(__UpperCAmelCase ) model.eval() for name, param in model.named_parameters(): print(__UpperCAmelCase , param.shape ) _lowercase : List[str] = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(__UpperCAmelCase ) == 0, F"""Unexpected keys: {unexpected_keys}""" # verify results _lowercase : Tuple = prepare_img() if "vistas" in model_name: _lowercase : int = 65 elif "cityscapes" in model_name: _lowercase : List[str] = 65535 else: _lowercase : Any = 255 _lowercase : List[str] = True if """ade""" in model_name else False _lowercase : int = MaskFormerImageProcessor(ignore_index=__UpperCAmelCase , reduce_labels=__UpperCAmelCase ) _lowercase : Optional[int] = image_processor(__UpperCAmelCase , return_tensors="""pt""" ) _lowercase : Union[str, Any] = model(**__UpperCAmelCase ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": _lowercase : Union[str, Any] = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" ) Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) model.save_pretrained(__UpperCAmelCase ) image_processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(F"""nielsr/{model_name}""" ) image_processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": UpperCAmelCase: str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase: Optional[int] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
364
"""simple docstring""" import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL UpperCAmelCase: List[Any] = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ): _lowercase : Union[str, Any] = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowercase : str = math.floor(val / multiple ) * multiple if x < min_val: _lowercase : Dict = math.ceil(val / multiple ) * multiple return x _lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size _lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase ) _lowercase , _lowercase : Union[str, Any] = output_size # determine new height and width _lowercase : str = output_height / input_height _lowercase : List[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowercase : str = scale_width else: # fit height _lowercase : int = scale_height _lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase ) _lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase ) return (new_height, new_width) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"] def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84} _lowercase : str = get_size_dict(UpperCAmelCase_ ) _lowercase : Tuple = do_resize _lowercase : Any = size _lowercase : List[Any] = keep_aspect_ratio _lowercase : Any = ensure_multiple_of _lowercase : str = resample _lowercase : Optional[Any] = do_rescale _lowercase : List[Any] = rescale_factor _lowercase : Union[str, Any] = do_normalize _lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): _lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _lowercase : Dict = get_resize_output_image_size( UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,) return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,): _lowercase : Any = do_resize if do_resize is not None else self.do_resize _lowercase : List[str] = size if size is not None else self.size _lowercase : int = get_size_dict(UpperCAmelCase_ ) _lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowercase : List[str] = resample if resample is not None else self.resample _lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : str = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean _lowercase : int = image_std if image_std is not None else self.image_std _lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ ) if not valid_images(UpperCAmelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images] if do_resize: _lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images] if do_rescale: _lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images] if do_normalize: _lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images] _lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images] _lowercase : int = {"""pixel_values""": images} return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ): _lowercase : Union[str, Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(UpperCAmelCase_ ): _lowercase : Tuple = target_sizes.numpy() _lowercase : Optional[Any] = [] for idx in range(len(UpperCAmelCase_ ) ): _lowercase : Dict = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ ) _lowercase : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase_ ) else: _lowercase : Union[str, Any] = logits.argmax(dim=1 ) _lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
336
0
from random import randint from tempfile import TemporaryFile import numpy as np def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' lowercase = 0 if start < end: lowercase = randint(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = a[end] lowercase = a[pivot] lowercase = temp lowercase , lowercase = _in_place_partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) count += _in_place_quick_sort(lowerCAmelCase__ , lowerCAmelCase__ , p - 1 ) count += _in_place_quick_sort(lowerCAmelCase__ , p + 1 , lowerCAmelCase__ ) return count def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' lowercase = 0 lowercase = randint(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = a[end] lowercase = a[pivot] lowercase = temp lowercase = start - 1 for index in range(lowerCAmelCase__ , lowerCAmelCase__ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowercase = new_pivot_index + 1 lowercase = a[new_pivot_index] lowercase = a[index] lowercase = temp lowercase = a[new_pivot_index + 1] lowercase = a[end] lowercase = temp return new_pivot_index + 1, count lowercase__ :Optional[Any] = TemporaryFile() lowercase__ :Tuple = 100 # 1000 elements are to be sorted lowercase__ , lowercase__ :List[str] = 0, 1 # mean and standard deviation lowercase__ :str = np.random.normal(mu, sigma, p) np.save(outfile, X) print("The array is") print(X) outfile.seek(0) # using the same array lowercase__ :int = np.load(outfile) lowercase__ :Dict = len(M) - 1 lowercase__ :int = _in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" "is :" ) print(z)
101
from __future__ import annotations lowercase__ :Any = 1.60_21E-19 # units = C def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
101
1
"""simple docstring""" import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def __SCREAMING_SNAKE_CASE ( A_ ): random.seed(A_ ) np.random.seed(A_ ) torch.manual_seed(A_ ) torch.cuda.manual_seed_all(A_ ) # ^^ safe to call this function even if cuda is not available class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[int] ,lowercase_ : Iterable[torch.nn.Parameter] ,lowercase_ : float = 0.9999 ,lowercase_ : float = 0.0 ,lowercase_ : int = 0 ,lowercase_ : bool = False ,lowercase_ : Union[float, int] = 1.0 ,lowercase_ : Union[float, int] = 2 / 3 ,lowercase_ : Optional[Any] = None ,lowercase_ : Dict[str, Any] = None ,**lowercase_ : List[str] ,): if isinstance(lowercase_ ,torch.nn.Module ): lowerCAmelCase__ : str = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' ,'''1.0.0''' ,lowercase_ ,standard_warn=lowercase_ ,) lowerCAmelCase__ : Any = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility lowerCAmelCase__ : Union[str, Any] = True if kwargs.get('''max_value''' ,lowercase_ ) is not None: lowerCAmelCase__ : Tuple = '''The `max_value` argument is deprecated. Please use `decay` instead.''' deprecate('''max_value''' ,'''1.0.0''' ,lowercase_ ,standard_warn=lowercase_ ) lowerCAmelCase__ : List[str] = kwargs['''max_value'''] if kwargs.get('''min_value''' ,lowercase_ ) is not None: lowerCAmelCase__ : List[Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.''' deprecate('''min_value''' ,'''1.0.0''' ,lowercase_ ,standard_warn=lowercase_ ) lowerCAmelCase__ : int = kwargs['''min_value'''] lowerCAmelCase__ : str = list(lowercase_ ) lowerCAmelCase__ : Union[str, Any] = [p.clone().detach() for p in parameters] if kwargs.get('''device''' ,lowercase_ ) is not None: lowerCAmelCase__ : Any = '''The `device` argument is deprecated. Please use `to` instead.''' deprecate('''device''' ,'''1.0.0''' ,lowercase_ ,standard_warn=lowercase_ ) self.to(device=kwargs['''device'''] ) lowerCAmelCase__ : int = None lowerCAmelCase__ : Union[str, Any] = decay lowerCAmelCase__ : Tuple = min_decay lowerCAmelCase__ : Tuple = update_after_step lowerCAmelCase__ : Union[str, Any] = use_ema_warmup lowerCAmelCase__ : Union[str, Any] = inv_gamma lowerCAmelCase__ : str = power lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : List[Any] = None # set in `step()` lowerCAmelCase__ : Union[str, Any] = model_cls lowerCAmelCase__ : Tuple = model_config @classmethod def __lowerCAmelCase ( cls : Optional[int] ,lowercase_ : Optional[int] ,lowercase_ : Tuple ): lowerCAmelCase__ : str = model_cls.load_config(lowercase_ ,return_unused_kwargs=lowercase_ ) lowerCAmelCase__ : List[Any] = model_cls.from_pretrained(lowercase_ ) lowerCAmelCase__ : Tuple = cls(model.parameters() ,model_cls=lowercase_ ,model_config=model.config ) ema_model.load_state_dict(lowercase_ ) return ema_model def __lowerCAmelCase ( self : Dict ,lowercase_ : Tuple ): if self.model_cls is None: raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' ) if self.model_config is None: raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' ) lowerCAmelCase__ : List[str] = self.model_cls.from_config(self.model_config ) lowerCAmelCase__ : Optional[int] = self.state_dict() state_dict.pop('''shadow_params''' ,lowercase_ ) model.register_to_config(**lowercase_ ) self.copy_to(model.parameters() ) model.save_pretrained(lowercase_ ) def __lowerCAmelCase ( self : List[Any] ,lowercase_ : int ): lowerCAmelCase__ : Any = max(0 ,optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: lowerCAmelCase__ : Dict = 1 - (1 + step / self.inv_gamma) ** -self.power else: lowerCAmelCase__ : Optional[int] = (1 + step) / (1_0 + step) lowerCAmelCase__ : List[str] = min(lowercase_ ,self.decay ) # make sure decay is not smaller than min_decay lowerCAmelCase__ : Any = max(lowercase_ ,self.min_decay ) return cur_decay_value @torch.no_grad() def __lowerCAmelCase ( self : str ,lowercase_ : Iterable[torch.nn.Parameter] ): if isinstance(lowercase_ ,torch.nn.Module ): lowerCAmelCase__ : List[str] = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' ,'''1.0.0''' ,lowercase_ ,standard_warn=lowercase_ ,) lowerCAmelCase__ : Union[str, Any] = parameters.parameters() lowerCAmelCase__ : List[str] = list(lowercase_ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. lowerCAmelCase__ : Union[str, Any] = self.get_decay(self.optimization_step ) lowerCAmelCase__ : Tuple = decay lowerCAmelCase__ : List[Any] = 1 - decay lowerCAmelCase__ : List[str] = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params ,lowercase_ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): lowerCAmelCase__ : Optional[int] = deepspeed.zero.GatheredParameters(lowercase_ ,modifier_rank=lowercase_ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(lowercase_ ) def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Iterable[torch.nn.Parameter] ): lowerCAmelCase__ : Any = list(lowercase_ ) for s_param, param in zip(self.shadow_params ,lowercase_ ): param.data.copy_(s_param.to(param.device ).data ) def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : int=None ,lowercase_ : List[str]=None ): lowerCAmelCase__ : Optional[int] = [ p.to(device=lowercase_ ,dtype=lowercase_ ) if p.is_floating_point() else p.to(device=lowercase_ ) for p in self.shadow_params ] def __lowerCAmelCase ( self : Optional[Any] ): return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Iterable[torch.nn.Parameter] ): lowerCAmelCase__ : Optional[Any] = [param.detach().cpu().clone() for param in parameters] def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Iterable[torch.nn.Parameter] ): if self.temp_stored_params is None: raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' ) for c_param, param in zip(self.temp_stored_params ,lowercase_ ): param.data.copy_(c_param.data ) # Better memory-wise. lowerCAmelCase__ : Any = None def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : dict ): lowerCAmelCase__ : List[str] = copy.deepcopy(lowercase_ ) lowerCAmelCase__ : str = state_dict.get('''decay''' ,self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('''Decay must be between 0 and 1''' ) lowerCAmelCase__ : Dict = state_dict.get('''min_decay''' ,self.min_decay ) if not isinstance(self.min_decay ,lowercase_ ): raise ValueError('''Invalid min_decay''' ) lowerCAmelCase__ : str = state_dict.get('''optimization_step''' ,self.optimization_step ) if not isinstance(self.optimization_step ,lowercase_ ): raise ValueError('''Invalid optimization_step''' ) lowerCAmelCase__ : List[str] = state_dict.get('''update_after_step''' ,self.update_after_step ) if not isinstance(self.update_after_step ,lowercase_ ): raise ValueError('''Invalid update_after_step''' ) lowerCAmelCase__ : Optional[int] = state_dict.get('''use_ema_warmup''' ,self.use_ema_warmup ) if not isinstance(self.use_ema_warmup ,lowercase_ ): raise ValueError('''Invalid use_ema_warmup''' ) lowerCAmelCase__ : Any = state_dict.get('''inv_gamma''' ,self.inv_gamma ) if not isinstance(self.inv_gamma ,(float, int) ): raise ValueError('''Invalid inv_gamma''' ) lowerCAmelCase__ : int = state_dict.get('''power''' ,self.power ) if not isinstance(self.power ,(float, int) ): raise ValueError('''Invalid power''' ) lowerCAmelCase__ : Optional[Any] = state_dict.get('''shadow_params''' ,lowercase_ ) if shadow_params is not None: lowerCAmelCase__ : List[str] = shadow_params if not isinstance(self.shadow_params ,lowercase_ ): raise ValueError('''shadow_params must be a list''' ) if not all(isinstance(lowercase_ ,torch.Tensor ) for p in self.shadow_params ): raise ValueError('''shadow_params must all be Tensors''' )
366
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __UpperCamelCase : int = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') __UpperCamelCase : Dict = F'''https://www.google.com/search?q={query}&num=100''' __UpperCamelCase : Tuple = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: __UpperCamelCase : Tuple = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: __UpperCamelCase : Optional[Any] = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
74
0
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Tuple = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() __lowerCAmelCase : Tuple = dict(zip(A_ , range(len(A_ ) ) ) ) __lowerCAmelCase : str = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } __lowerCAmelCase : Dict = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_6000, '''return_attention_mask''': False, '''do_normalize''': True, } __lowerCAmelCase : int = tempfile.mkdtemp() __lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , A_ ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) # load decoder from hub __lowerCAmelCase : Dict = '''hf-internal-testing/ngram-beam-search-decoder''' def UpperCamelCase__ ( self , **A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[str] = self.add_kwargs_tokens_map.copy() kwargs.update(A_ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self , **A_ ) ->str: '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Tuple = self.get_tokenizer() __lowerCAmelCase : List[Any] = self.get_feature_extractor() __lowerCAmelCase : List[str] = self.get_decoder() __lowerCAmelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , A_ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , A_ ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , A_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match __lowerCAmelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[str] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(A_ , '''include''' ): WavaVecaProcessorWithLM( tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.get_feature_extractor() __lowerCAmelCase : List[Any] = self.get_tokenizer() __lowerCAmelCase : Optional[int] = self.get_decoder() __lowerCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ ) __lowerCAmelCase : int = floats_list((3, 1000) ) __lowerCAmelCase : Tuple = feature_extractor(A_ , return_tensors='''np''' ) __lowerCAmelCase : str = processor(A_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Dict = self.get_feature_extractor() __lowerCAmelCase : Dict = self.get_tokenizer() __lowerCAmelCase : List[str] = self.get_decoder() __lowerCAmelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ ) __lowerCAmelCase : Dict = '''This is a test string''' __lowerCAmelCase : Dict = processor(text=A_ ) __lowerCAmelCase : List[Any] = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ ( self , A_=(2, 10, 16) , A_=77 ) ->Tuple: '''simple docstring''' np.random.seed(A_ ) return np.random.rand(*A_ ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.get_feature_extractor() __lowerCAmelCase : Optional[int] = self.get_tokenizer() __lowerCAmelCase : List[str] = self.get_decoder() __lowerCAmelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ ) __lowerCAmelCase : int = self._get_dummy_logits(shape=(10, 16) , seed=13 ) __lowerCAmelCase : Any = processor.decode(A_ ) __lowerCAmelCase : str = decoder.decode_beams(A_ )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def UpperCamelCase__ ( self , A_ ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : int = self.get_feature_extractor() __lowerCAmelCase : Tuple = self.get_tokenizer() __lowerCAmelCase : Any = self.get_decoder() __lowerCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ ) __lowerCAmelCase : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: __lowerCAmelCase : Union[str, Any] = processor.batch_decode(A_ ) else: with get_context(A_ ).Pool() as pool: __lowerCAmelCase : Optional[int] = processor.batch_decode(A_ , A_ ) __lowerCAmelCase : List[str] = list(A_ ) with get_context('''fork''' ).Pool() as p: __lowerCAmelCase : str = decoder.decode_beams_batch(A_ , A_ ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Dict = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(A_ , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(A_ , decoded_processor.logit_score ) self.assertListEqual(A_ , decoded_processor.lm_score ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Dict = self.get_feature_extractor() __lowerCAmelCase : List[Any] = self.get_tokenizer() __lowerCAmelCase : List[str] = self.get_decoder() __lowerCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ ) __lowerCAmelCase : List[Any] = self._get_dummy_logits() __lowerCAmelCase : Tuple = 15 __lowerCAmelCase : Any = -20.0 __lowerCAmelCase : List[str] = -4.0 __lowerCAmelCase : Optional[int] = processor.batch_decode( A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , ) __lowerCAmelCase : Union[str, Any] = decoded_processor_out.text __lowerCAmelCase : Union[str, Any] = list(A_ ) with get_context('''fork''' ).Pool() as pool: __lowerCAmelCase : List[str] = decoder.decode_beams_batch( A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , ) __lowerCAmelCase : Any = [d[0][0] for d in decoded_decoder_out] __lowerCAmelCase : Optional[Any] = [d[0][2] for d in decoded_decoder_out] __lowerCAmelCase : List[str] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(A_ , A_ ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ ) self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1e-3 ) ) self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474] , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : int = self.get_feature_extractor() __lowerCAmelCase : Tuple = self.get_tokenizer() __lowerCAmelCase : List[str] = self.get_decoder() __lowerCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ ) __lowerCAmelCase : List[str] = self._get_dummy_logits() __lowerCAmelCase : str = 2.0 __lowerCAmelCase : int = 5.0 __lowerCAmelCase : Any = -20.0 __lowerCAmelCase : Union[str, Any] = True __lowerCAmelCase : Optional[int] = processor.batch_decode( A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , ) __lowerCAmelCase : Dict = decoded_processor_out.text __lowerCAmelCase : Optional[int] = list(A_ ) decoder.reset_params( alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , ) with get_context('''fork''' ).Pool() as pool: __lowerCAmelCase : int = decoder.decode_beams_batch( A_ , A_ , ) __lowerCAmelCase : int = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(A_ , A_ ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ ) __lowerCAmelCase : List[Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] __lowerCAmelCase : int = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __lowerCAmelCase : Union[str, Any] = os.listdir(A_ ) __lowerCAmelCase : Dict = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : str = snapshot_download('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(A_ ) __lowerCAmelCase : List[str] = processor.decoder.model_container[processor.decoder._model_key] __lowerCAmelCase : List[Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __lowerCAmelCase : List[str] = os.listdir(A_ ) __lowerCAmelCase : Union[str, Any] = os.listdir(A_ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : str = floats_list((3, 1000) ) __lowerCAmelCase : int = processor_wavaveca(A_ , return_tensors='''np''' ) __lowerCAmelCase : int = processor_auto(A_ , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) __lowerCAmelCase : Optional[Any] = self._get_dummy_logits() __lowerCAmelCase : int = processor_wavaveca.batch_decode(A_ ) __lowerCAmelCase : Tuple = processor_auto.batch_decode(A_ ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : int = self.get_feature_extractor() __lowerCAmelCase : Optional[int] = self.get_tokenizer() __lowerCAmelCase : List[Any] = self.get_decoder() __lowerCAmelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def UpperCamelCase__ ( A_ , A_ ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[str] = [d[key] for d in offsets] return retrieved_list def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : Optional[Any] = self._get_dummy_logits()[0] __lowerCAmelCase : Any = processor.decode(A_ , output_word_offsets=A_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(A_ , A_ ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __lowerCAmelCase : Any = self._get_dummy_logits() __lowerCAmelCase : str = processor.batch_decode(A_ , output_word_offsets=A_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(A_ , A_ ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' import torch __lowerCAmelCase : int = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ ) __lowerCAmelCase : Union[str, Any] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) ) __lowerCAmelCase : Optional[int] = iter(A_ ) __lowerCAmelCase : int = next(A_ ) __lowerCAmelCase : str = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) __lowerCAmelCase : str = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train __lowerCAmelCase : Tuple = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): __lowerCAmelCase : Any = model(A_ ).logits.cpu().numpy() __lowerCAmelCase : Dict = processor.decode(logits[0] , output_word_offsets=A_ ) __lowerCAmelCase : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate __lowerCAmelCase : Union[str, Any] = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] __lowerCAmelCase : Union[str, Any] = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ ) self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text ) # output times __lowerCAmelCase : Tuple = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) ) __lowerCAmelCase : Tuple = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) ) # fmt: off __lowerCAmelCase : Tuple = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] ) __lowerCAmelCase : Dict = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) ) self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
275
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowercase (unittest.TestCase ): @property def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[str] = self.dummy_uncond_unet __lowerCAmelCase : Any = PNDMScheduler() __lowerCAmelCase : Dict = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' ).images __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=A_ )[0] __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = '''google/ddpm-cifar10-32''' __lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(A_ ) __lowerCAmelCase : int = PNDMScheduler() __lowerCAmelCase : Any = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , output_type='''numpy''' ).images __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
275
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) snake_case : Optional[int] = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Tuple = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
364
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class snake_case_ (unittest.TestCase ): UpperCAmelCase__ : Optional[Any] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase__ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) UpperCAmelCase__ : Optional[int] = ['''accelerate''', '''launch'''] UpperCAmelCase__ : List[str] = Path.home() / '''.cache/huggingface/accelerate''' UpperCAmelCase__ : Union[str, Any] = '''default_config.yaml''' UpperCAmelCase__ : List[Any] = config_folder / config_file UpperCAmelCase__ : Optional[int] = config_folder / '''_default_config.yaml''' UpperCAmelCase__ : int = Path('''tests/test_configs''' ) @classmethod def lowerCamelCase__( cls :Any ) -> int: if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def lowerCamelCase__( cls :Dict ) -> Union[str, Any]: if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def lowerCamelCase__( self :Any ) -> Optional[int]: a__ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy() ) def lowerCamelCase__( self :str ) -> List[Any]: for config in sorted(self.test_config_path.glob('**/*.yaml' ) ): with self.subTest(config_file=__snake_case ): execute_subprocess_async( self.base_cmd + ['--config_file', str(__snake_case ), self.test_file_path] ,env=os.environ.copy() ) def lowerCamelCase__( self :int ) -> List[str]: execute_subprocess_async(['accelerate', 'test'] ,env=os.environ.copy() ) class snake_case_ (unittest.TestCase ): UpperCAmelCase__ : List[Any] = '''test-tpu''' UpperCAmelCase__ : str = '''us-central1-a''' UpperCAmelCase__ : Optional[Any] = '''ls''' UpperCAmelCase__ : Optional[int] = ['''accelerate''', '''tpu-config'''] UpperCAmelCase__ : Any = '''cd /usr/share''' UpperCAmelCase__ : Tuple = '''tests/test_samples/test_command_file.sh''' UpperCAmelCase__ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh''' def lowerCamelCase__( self :Optional[Any] ) -> Tuple: a__ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] ,return_stdout=__snake_case ,) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,__snake_case ,) def lowerCamelCase__( self :int ) -> str: a__ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] ,return_stdout=__snake_case ,) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,__snake_case ,) def lowerCamelCase__( self :Any ) -> Dict: a__ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] ,return_stdout=__snake_case ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,__snake_case ,) def lowerCamelCase__( self :Tuple ) -> str: a__ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] ,return_stdout=__snake_case ,) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,__snake_case ,) def lowerCamelCase__( self :Dict ) -> int: a__ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] ,return_stdout=__snake_case ,) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' ,__snake_case ,) def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]: a__ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] ,return_stdout=__snake_case ,) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,__snake_case ,) def lowerCamelCase__( self :int ) -> int: a__ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] ,return_stdout=__snake_case ,) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,__snake_case ,) def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]: a__ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] ,return_stdout=__snake_case ,) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' ,__snake_case ,) def lowerCamelCase__( self :Any ) -> Optional[int]: a__ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] ,return_stdout=__snake_case ,) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' ,__snake_case ,)
109
0
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [1] UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = 0, 0, 0 UpperCAmelCase : Dict = ugly_nums[ia] * 2 UpperCAmelCase : Union[str, Any] = ugly_nums[ia] * 3 UpperCAmelCase : int = ugly_nums[ia] * 5 for _ in range(1 , __magic_name__ ): UpperCAmelCase : Optional[Any] = min(__magic_name__ , __magic_name__ , __magic_name__ ) ugly_nums.append(__magic_name__ ) if next_num == next_a: ia += 1 UpperCAmelCase : List[Any] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 UpperCAmelCase : List[str] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 UpperCAmelCase : Optional[Any] = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F'{ugly_numbers(2_00) = }')
311
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Dict = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase : Tuple = 192 UpperCAmelCase : str = 768 UpperCAmelCase : List[Any] = 12 UpperCAmelCase : List[Any] = 3 UpperCAmelCase : List[Any] = [800, 1333] UpperCAmelCase : List[str] = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Union[str, Any] = 330 UpperCAmelCase : Union[str, Any] = 14 UpperCAmelCase : Any = 6 UpperCAmelCase : int = 1320 elif "yolos_s" in yolos_name: UpperCAmelCase : Union[str, Any] = 384 UpperCAmelCase : Dict = 1536 UpperCAmelCase : str = 12 UpperCAmelCase : List[str] = 6 elif "yolos_b" in yolos_name: UpperCAmelCase : int = [800, 1344] UpperCAmelCase : Optional[int] = 91 UpperCAmelCase : int = "huggingface/label-files" UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json" UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCAmelCase : str = idalabel UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :] UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def lowercase ( __magic_name__ ): '''simple docstring''' if "backbone" in name: UpperCAmelCase : int = name.replace("backbone" , "vit" ) if "cls_token" in name: UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase : Any = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ ) if "qkv" in key: UpperCAmelCase : str = key.split("." ) UpperCAmelCase : List[Any] = int(key_split[2] ) UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase : Optional[int] = val[:dim, :] UpperCAmelCase : Union[str, Any] = val[ dim : dim * 2, : ] UpperCAmelCase : Any = val[-dim:, :] else: UpperCAmelCase : Tuple = val[:dim] UpperCAmelCase : List[str] = val[dim : dim * 2] UpperCAmelCase : Any = val[-dim:] else: UpperCAmelCase : Union[str, Any] = val return orig_state_dict def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ ) # load original state_dict UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"] # load 🤗 model UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ ) model.eval() UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512 UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ ) UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCAmelCase : List[str] = model(**__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None if yolos_name == "yolos_ti": UpperCAmelCase : str = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) UpperCAmelCase : Tuple = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase : Union[str, Any] = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) UpperCAmelCase : List[str] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase : List[str] = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) UpperCAmelCase : Dict = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Dict = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) UpperCAmelCase : List[Any] = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": UpperCAmelCase : str = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: UpperCAmelCase : int = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) UpperCAmelCase : Tuple = model_mapping[yolos_name] image_processor.push_to_hub(__magic_name__ , organization="hustvl" ) model.push_to_hub(__magic_name__ , organization="hustvl" ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a : str = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
311
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json', 'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json', 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json', 'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json', } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = 'funnel' UpperCAmelCase__ : Optional[Any] = { 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', } def __init__( self: str , UpperCamelCase_: Optional[Any]=3_05_22 , UpperCamelCase_: Optional[Any]=[4, 4, 4] , UpperCamelCase_: List[str]=None , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Optional[Any]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Optional[Any]=64 , UpperCamelCase_: Any=30_72 , UpperCamelCase_: Optional[Any]="gelu_new" , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Dict=None , UpperCamelCase_: Optional[int]=1E-9 , UpperCamelCase_: Optional[Any]="mean" , UpperCamelCase_: Any="relative_shift" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=True , UpperCamelCase_: Union[str, Any]=True , **UpperCamelCase_: List[str] , ): __lowerCamelCase = vocab_size __lowerCamelCase = block_sizes __lowerCamelCase = [1] * len(UpperCamelCase_ ) if block_repeats is None else block_repeats assert len(UpperCamelCase_ ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." __lowerCamelCase = num_decoder_layers __lowerCamelCase = d_model __lowerCamelCase = n_head __lowerCamelCase = d_head __lowerCamelCase = d_inner __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = initializer_range __lowerCamelCase = initializer_std __lowerCamelCase = layer_norm_eps assert pooling_type in [ "mean", "max", ], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.' __lowerCamelCase = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.' __lowerCamelCase = attention_type __lowerCamelCase = separate_cls __lowerCamelCase = truncate_seq __lowerCamelCase = pool_q_only super().__init__(**UpperCamelCase_ ) @property def lowerCAmelCase__ ( self: Tuple ): return sum(self.block_sizes ) @num_hidden_layers.setter def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] ): raise NotImplementedError( """This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" ) @property def lowerCAmelCase__ ( self: Optional[int] ): return len(self.block_sizes ) @num_blocks.setter def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Tuple ): raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
29
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model') UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'} UpperCAmelCase_ = '>>zh<<' UpperCAmelCase_ = 'Helsinki-NLP/' if is_torch_available(): UpperCAmelCase_ = 'pt' elif is_tf_available(): UpperCAmelCase_ = 'tf' else: UpperCAmelCase_ = 'jax' @require_sentencepiece class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Union[str, Any] = MarianTokenizer UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : int = True def lowerCAmelCase__ ( self: Union[str, Any] ): super().setUp() __lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] __lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowerCamelCase = Path(self.tmpdirname ) save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) __lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ): return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ): return ( "This is a test", "This is a test", ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = """</s>""" __lowerCamelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase_ ) , 9 ) def lowerCAmelCase__ ( self: Tuple ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' ) __lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0] self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] ) __lowerCamelCase = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )] self.assertIn("""source.spm""" , UpperCamelCase_ ) MarianTokenizer.from_pretrained(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = tok( ["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 5_12) ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowerCAmelCase__ ( self: Optional[int] ): # fmt: off __lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def lowerCAmelCase__ ( self: int ): __lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) __lowerCamelCase = """Tämä on testi""" __lowerCamelCase = """This is a test""" __lowerCamelCase = [76, 7, 20_47, 2] __lowerCamelCase = [69, 12, 11, 9_40, 2] __lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
29
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = emb.weight.shape _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE : str = emb.weight.data return lin_layer def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" ) _SCREAMING_SNAKE_CASE : int = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] _SCREAMING_SNAKE_CASE : Dict = mam_aaa['''model'''] remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE : List[str] = state_dict['''encoder.embed_tokens.weight'''].shape[0] _SCREAMING_SNAKE_CASE : Optional[int] = MaMaaaConfig( vocab_size=SCREAMING_SNAKE_CASE__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) _SCREAMING_SNAKE_CASE : List[str] = state_dict['''decoder.embed_tokens.weight'''] _SCREAMING_SNAKE_CASE : Dict = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ) model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE : int = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCAmelCase_ : Union[str, Any] = parser.parse_args() UpperCAmelCase_ : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
200
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]: UpperCAmelCase : List[str] = 0 UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None UpperCAmelCase : Optional[Any] = sorted_collection[point] if current_item == item: return point else: if point < left: UpperCAmelCase : Any = left UpperCAmelCase : List[str] = point elif point > right: UpperCAmelCase : Any = right UpperCAmelCase : List[str] = point else: if item < current_item: UpperCAmelCase : Optional[int] = point - 1 else: UpperCAmelCase : str = point + 1 return None def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCAmelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) elif point > right: return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 ) else: return interpolation_search_by_recursion( UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> int: if collection != sorted(UpperCAmelCase ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys _lowerCamelCase : Optional[int] = 0 if debug == 1: _lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") _lowerCamelCase : List[Any] = 6_7 _lowerCamelCase : Optional[Any] = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
336
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[str] = 'upernet' def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ): super().__init__(**__lowerCAmelCase ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = backbone_config.get("""model_type""" ) _UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase = config_class.from_dict(__lowerCAmelCase ) _UpperCAmelCase = backbone_config _UpperCAmelCase = hidden_size _UpperCAmelCase = initializer_range _UpperCAmelCase = pool_scales _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_in_channels _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = loss_ignore_index def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
357
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'layoutlmv3' def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ): super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) _UpperCAmelCase = max_ad_position_embeddings _UpperCAmelCase = coordinate_size _UpperCAmelCase = shape_size _UpperCAmelCase = has_relative_attention_bias _UpperCAmelCase = rel_pos_bins _UpperCAmelCase = max_rel_pos _UpperCAmelCase = has_spatial_attention_bias _UpperCAmelCase = rel_ad_pos_bins _UpperCAmelCase = max_rel_ad_pos _UpperCAmelCase = text_embed _UpperCAmelCase = visual_embed _UpperCAmelCase = input_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = classifier_dropout class a ( lowerCAmelCase_ ): _snake_case : str = version.parse('1.12' ) @property def lowerCAmelCase_ ( self : Dict ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def lowerCAmelCase_ ( self : List[Any] ): return 1e-5 @property def lowerCAmelCase_ ( self : List[str] ): return 12 def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes _UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) _UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
30
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class UpperCamelCase__( _lowercase ): lowerCAmelCase__ : List[Any] = '''transfo-xl''' lowerCAmelCase__ : Tuple = ['''mems'''] lowerCAmelCase__ : Tuple = { '''n_token''': '''vocab_size''', '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self ,__UpperCAmelCase=26_77_35 ,__UpperCAmelCase=[2_00_00, 4_00_00, 20_00_00] ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=16 ,__UpperCAmelCase=64 ,__UpperCAmelCase=40_96 ,__UpperCAmelCase=4 ,__UpperCAmelCase=False ,__UpperCAmelCase=18 ,__UpperCAmelCase=16_00 ,__UpperCAmelCase=10_00 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=0 ,__UpperCAmelCase=-1 ,__UpperCAmelCase=True ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase="normal" ,__UpperCAmelCase=0.0_1 ,__UpperCAmelCase=0.0_1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1e-5 ,__UpperCAmelCase=0 ,**__UpperCAmelCase ,) -> int: A__ = vocab_size A__ = [] self.cutoffs.extend(A_ ) if proj_share_all_but_first: A__ = [False] + [True] * len(self.cutoffs ) else: A__ = [False] + [False] * len(self.cutoffs ) A__ = d_model A__ = d_embed A__ = d_head A__ = d_inner A__ = div_val A__ = pre_lnorm A__ = n_layer A__ = n_head A__ = mem_len A__ = same_length A__ = attn_type A__ = clamp_len A__ = sample_softmax A__ = adaptive A__ = dropout A__ = dropatt A__ = untie_r A__ = init A__ = init_range A__ = proj_init_std A__ = init_std A__ = layer_norm_epsilon super().__init__(eos_token_id=A_ ,**A_ ) @property def snake_case__ ( self ) -> Optional[int]: # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case__ ( self ,__UpperCAmelCase ) -> str: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
221
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _snake_case ( snake_case__ : Dict ): A = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : int ): A , A = emb.weight.shape A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) A = emb.weight.data return lin_layer def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ): A = torch.load(snake_case__ , map_location='cpu' )['model'] remove_ignore_keys_(snake_case__ ) A = state_dict['encoder.embed_tokens.weight'].shape[0] A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ ) if mbart_aa and finetuned: A = 'relu' A = state_dict['decoder.embed_tokens.weight'] A = MBartForConditionalGeneration(snake_case__ ) model.model.load_state_dict(snake_case__ ) if finetuned: A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') _lowercase = parser.parse_args() _lowercase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
74
0
from collections.abc import Iterable from typing import Generic, TypeVar _lowerCamelCase : Any = TypeVar("_T") class __UpperCAmelCase ( Generic[_T] ): def __init__( self : str, __A : Iterable[_T] | None = None ): UpperCAmelCase : list[_T] = list(iterable or [] ) UpperCAmelCase : list[_T] = [] def __len__( self : str ): return len(self._stacka ) + len(self._stacka ) def __repr__( self : Optional[int] ): return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})''' def __magic_name__ ( self : Dict, __A : _T ): self._stacka.append(__A ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : List[str] = self._stacka.pop UpperCAmelCase : Union[str, Any] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError('''Queue is empty''' ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
99
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _lowerCamelCase : Dict = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCAmelCase ( lowerCamelCase__ ): def __init__( self : Optional[Any], *__A : Tuple, **__A : Tuple ): super().__init__(*__A, **__A ) self.check_model_type(__A ) def __magic_name__ ( self : Union[str, Any], __A : int=None, __A : Tuple=None, __A : Any=None, **__A : Optional[int] ): UpperCAmelCase , UpperCAmelCase : List[Any] = {}, {} if padding is not None: UpperCAmelCase : Optional[int] = padding if truncation is not None: UpperCAmelCase : Optional[int] = truncation if top_k is not None: UpperCAmelCase : Tuple = top_k return preprocess_params, {}, postprocess_params def __call__( self : Union[str, Any], __A : Union["Image.Image", str], __A : str = None, **__A : Optional[int] ): if isinstance(__A, (Image.Image, str) ) and isinstance(__A, __A ): UpperCAmelCase : int = {'''image''': image, '''question''': question} else: UpperCAmelCase : str = image UpperCAmelCase : Union[str, Any] = super().__call__(__A, **__A ) return results def __magic_name__ ( self : List[str], __A : Union[str, Any], __A : Tuple=False, __A : List[Any]=False ): UpperCAmelCase : int = load_image(inputs['''image'''] ) UpperCAmelCase : List[str] = self.tokenizer( inputs['''question'''], return_tensors=self.framework, padding=__A, truncation=__A ) UpperCAmelCase : Union[str, Any] = self.image_processor(images=__A, return_tensors=self.framework ) model_inputs.update(__A ) return model_inputs def __magic_name__ ( self : Optional[Any], __A : List[Any] ): UpperCAmelCase : Optional[int] = self.model(**__A ) return model_outputs def __magic_name__ ( self : Any, __A : List[str], __A : Union[str, Any]=5 ): if top_k > self.model.config.num_labels: UpperCAmelCase : Any = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase : Any = model_outputs.logits.sigmoid()[0] UpperCAmelCase , UpperCAmelCase : Union[str, Any] = probs.topk(__A ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) UpperCAmelCase : str = scores.tolist() UpperCAmelCase : Tuple = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__A, __A )]
99
1
'''simple docstring''' import numpy class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCamelCase_ = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCamelCase_ = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCamelCase_ = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCamelCase_ = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCamelCase_ = numpy.zeros(output_array.shape ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCamelCase_ = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCamelCase_ = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def snake_case ( self ): """simple docstring""" lowerCamelCase_ = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCamelCase_ = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCamelCase_ = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" for iteration in range(1 , iterations + 1 ): lowerCamelCase_ = self.feedforward() self.back_propagation() if give_loss: lowerCamelCase_ = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'''Iteration {iteration} Loss: {loss}''' ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = input_arr lowerCamelCase_ = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCamelCase_ = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCamelCase_ = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def __snake_case ( UpperCAmelCase_ : numpy.ndarray ): return 1 / (1 + numpy.exp(-value )) def __snake_case ( UpperCAmelCase_ : numpy.ndarray ): return (value) * (1 - (value)) def __snake_case ( ): lowerCamelCase_ = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCamelCase_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. lowerCamelCase_ = TwoHiddenLayerNeuralNetwork( input_array=UpperCAmelCase_ , output_array=UpperCAmelCase_ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=UpperCAmelCase_ , iterations=10 , give_loss=UpperCAmelCase_ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
55
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def _snake_case ( ): UpperCAmelCase : List[str] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=UpperCamelCase ) UpperCAmelCase : Dict = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=UpperCamelCase ) env_command_parser(subparsers=UpperCamelCase ) launch_command_parser(subparsers=UpperCamelCase ) tpu_command_parser(subparsers=UpperCamelCase ) test_command_parser(subparsers=UpperCamelCase ) # Let's go UpperCAmelCase : Optional[int] = parser.parse_args() if not hasattr(UpperCamelCase , """func""" ): parser.print_help() exit(1 ) # Run args.func(UpperCamelCase ) if __name__ == "__main__": main()
109
0
'''simple docstring''' import math def _A ( snake_case ) -> list[int]: _lowercase : Union[str, Any] = [] _lowercase : Any = 2 _lowercase : List[str] = int(math.sqrt(snake_case ) ) # Size of every segment _lowercase : str = [True] * (end + 1) _lowercase : Dict = [] while start <= end: if temp[start] is True: in_prime.append(snake_case ) for i in range(start * start , end + 1 , snake_case ): _lowercase : List[str] = False start += 1 prime += in_prime _lowercase : int = end + 1 _lowercase : Union[str, Any] = min(2 * end , snake_case ) while low <= n: _lowercase : str = [True] * (high - low + 1) for each in in_prime: _lowercase : List[Any] = math.floor(low / each ) * each if t < low: t += each for j in range(snake_case , high + 1 , snake_case ): _lowercase : str = False for j in range(len(snake_case ) ): if temp[j] is True: prime.append(j + low ) _lowercase : str = high + 1 _lowercase : Any = min(high + end , snake_case ) return prime print(sieve(10**6))
353
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class a__ ( lowerCamelCase_ ): _SCREAMING_SNAKE_CASE : Any = ['image_processor', 'tokenizer'] _SCREAMING_SNAKE_CASE : Dict = 'BlipImageProcessor' _SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" super().__init__(_UpperCamelCase , _UpperCamelCase ) # add QFormer tokenizer _lowercase : List[Any] = qformer_tokenizer def __call__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ): """simple docstring""" if images is None and text is None: raise ValueError("You have to specify at least images or text." ) _lowercase : str = BatchFeature() if text is not None: _lowercase : Dict = self.tokenizer( text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , ) encoding.update(_UpperCamelCase ) _lowercase : Dict = self.qformer_tokenizer( text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , ) _lowercase : Union[str, Any] = qformer_text_encoding.pop("input_ids" ) _lowercase : List[Any] = qformer_text_encoding.pop("attention_mask" ) if images is not None: _lowercase : List[Any] = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase ) encoding.update(_UpperCamelCase ) return encoding def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Dict = self.tokenizer.model_input_names _lowercase : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _lowerCamelCase ( self , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" if os.path.isfile(_UpperCamelCase ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) _lowercase : Union[str, Any] = os.path.join(_UpperCamelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(_UpperCamelCase ) return super().save_pretrained(_UpperCamelCase , **_UpperCamelCase ) @classmethod def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" _lowercase : List[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase , subfolder="qformer_tokenizer" ) _lowercase : Optional[Any] = cls._get_arguments_from_pretrained(_UpperCamelCase , **_UpperCamelCase ) args.append(_UpperCamelCase ) return cls(*_UpperCamelCase )
199
0
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json' ), } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Tuple = '''xlm-prophetnet''' _snake_case : Dict = ['''past_key_values'''] _snake_case : List[str] = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self , _UpperCamelCase = 0.1 , _UpperCamelCase = "gelu" , _UpperCamelCase = 3_0_5_2_2 , _UpperCamelCase = 1_0_2_4 , _UpperCamelCase = 4_0_9_6 , _UpperCamelCase = 1_2 , _UpperCamelCase = 1_6 , _UpperCamelCase = 4_0_9_6 , _UpperCamelCase = 1_2 , _UpperCamelCase = 1_6 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 0.02 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 0 , _UpperCamelCase = 2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 1_2_8 , _UpperCamelCase = False , _UpperCamelCase = 0.0 , _UpperCamelCase = True , _UpperCamelCase = 0 , _UpperCamelCase = 1 , _UpperCamelCase = 2 , **_UpperCamelCase , ) -> Tuple: UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim UpperCAmelCase_ : Optional[Any] = num_encoder_layers UpperCAmelCase_ : Optional[int] = num_encoder_attention_heads UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim UpperCAmelCase_ : List[Any] = num_decoder_layers UpperCAmelCase_ : Optional[int] = num_decoder_attention_heads UpperCAmelCase_ : Tuple = max_position_embeddings UpperCAmelCase_ : int = init_std # Normal(0, this parameter) UpperCAmelCase_ : Optional[int] = activation_function # parameters for xlmprophetnet UpperCAmelCase_ : List[Any] = ngram UpperCAmelCase_ : Tuple = num_buckets UpperCAmelCase_ : Optional[Any] = relative_max_distance UpperCAmelCase_ : Any = disable_ngram_loss UpperCAmelCase_ : Optional[Any] = eps # 3 Types of Dropout UpperCAmelCase_ : Any = attention_dropout UpperCAmelCase_ : Tuple = activation_dropout UpperCAmelCase_ : Dict = dropout UpperCAmelCase_ : Dict = use_cache super().__init__( pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , add_cross_attention=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , ) @property def __UpperCAmelCase ( self ) -> int: return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def __UpperCAmelCase ( self , _UpperCamelCase ) -> int: raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and' ' `num_decoder_layers`.' )
29
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCamelCase = 25_0004 lowerCamelCase = 25_0020 @require_sentencepiece @require_tokenizers class _UpperCamelCase ( A , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = MBartTokenizer lowerCAmelCase__ = MBartTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __lowercase =MBartTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def __lowerCamelCase ( self : Tuple): '''simple docstring''' __lowercase =MBartTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase) __lowercase =tokenizer.tokenize('This is a test') self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) __lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __lowercase =tokenizer.convert_tokens_to_ids(_lowerCAmelCase) self.assertListEqual( _lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __lowercase =tokenizer.convert_ids_to_tokens(_lowerCAmelCase) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def __lowerCamelCase ( self : List[str]): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __lowercase =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""): __lowercase =self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase) __lowercase =self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase) __lowercase =tempfile.mkdtemp() __lowercase =tokenizer_r.save_pretrained(_lowerCAmelCase) __lowercase =tokenizer_p.save_pretrained(_lowerCAmelCase) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files)) __lowercase =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f) self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase) # Checks everything loads correctly in the same way __lowercase =tokenizer_r.from_pretrained(_lowerCAmelCase) __lowercase =tokenizer_p.from_pretrained(_lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowerCAmelCase) # Save tokenizer rust, legacy_format=True __lowercase =tempfile.mkdtemp() __lowercase =tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase) __lowercase =tokenizer_p.save_pretrained(_lowerCAmelCase) # Checks it save with the same files self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase) # Checks everything loads correctly in the same way __lowercase =tokenizer_r.from_pretrained(_lowerCAmelCase) __lowercase =tokenizer_p.from_pretrained(_lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase)) shutil.rmtree(_lowerCAmelCase) # Save tokenizer rust, legacy_format=False __lowercase =tempfile.mkdtemp() __lowercase =tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase) __lowercase =tokenizer_p.save_pretrained(_lowerCAmelCase) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way __lowercase =tokenizer_r.from_pretrained(_lowerCAmelCase) __lowercase =tokenizer_p.from_pretrained(_lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase)) shutil.rmtree(_lowerCAmelCase) @require_torch @require_sentencepiece @require_tokenizers class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = """facebook/mbart-large-en-ro""" lowerCAmelCase__ = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] lowerCAmelCase__ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] lowerCAmelCase__ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE] @classmethod def __lowerCamelCase ( cls : Any): '''simple docstring''' __lowercase =MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO') __lowercase =1 return cls def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 2_5_0_0_0_1) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 2_5_0_0_0_4) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 2_5_0_0_2_0) def __lowerCamelCase ( self : List[str]): '''simple docstring''' __lowercase =self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase) def __lowerCamelCase ( self : Tuple): '''simple docstring''' self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids) __lowercase =[RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2] __lowercase =self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase) __lowercase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase) self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase) def __lowerCamelCase ( self : Any): '''simple docstring''' __lowercase =['this is gunna be a long sentence ' * 2_0] assert isinstance(src_text[0] , _lowerCAmelCase) __lowercase =1_0 __lowercase =self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , _lowerCAmelCase) self.assertEqual(len(_lowerCAmelCase) , _lowerCAmelCase) def __lowerCamelCase ( self : Dict): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR']) , [2_5_0_0_2_6, 2_5_0_0_0_1]) def __lowerCamelCase ( self : Tuple): '''simple docstring''' __lowercase =tempfile.mkdtemp() __lowercase =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowerCAmelCase) __lowercase =MBartTokenizer.from_pretrained(_lowerCAmelCase) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase) @require_torch def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , return_tensors='pt') __lowercase =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def __lowerCamelCase ( self : List[Any]): '''simple docstring''' __lowercase =self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens) , return_tensors='pt' , ) __lowercase =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase) self.assertEqual((2, 1_4) , batch.input_ids.shape) self.assertEqual((2, 1_4) , batch.attention_mask.shape) __lowercase =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE]) def __lowerCamelCase ( self : Tuple): '''simple docstring''' __lowercase =self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors='pt') __lowercase =self.tokenizer( text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_0 , return_tensors='pt') __lowercase =targets['input_ids'] __lowercase =shift_tokens_right(_lowerCAmelCase , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0) @require_torch def __lowerCamelCase ( self : List[str]): '''simple docstring''' __lowercase =self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR') self.assertEqual( nested_simplify(_lowerCAmelCase) , { # A, test, EOS, en_XX 'input_ids': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 2_5_0_0_0_1, } , )
48
'''simple docstring''' from math import factorial def _A ( _lowerCAmelCase = 20 ): """simple docstring""" __lowercase =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... __lowercase =n // 2 return int(factorial(_lowerCAmelCase ) / (factorial(_lowerCAmelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: lowerCamelCase = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number.""")
48
1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' def count_of_possible_combinations(SCREAMING_SNAKE_CASE ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' def count_of_possible_combinations_with_dp_array( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __UpperCamelCase :Tuple = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __UpperCamelCase :Optional[int] = answer return answer __UpperCamelCase :Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Dict = [0] * (target + 1) __UpperCamelCase :Optional[Any] = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __lowercase = 3 __lowercase = 5 __lowercase = [1, 2, 5] print(combination_sum_iv(n, array, target))
43
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ = 1_0 def _lowercase ( self : int ) -> List[str]: lowercase_ = [1, 2, 3, 4] lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> Optional[Any]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> List[Any]: lowercase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = '''''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) lowercase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = ['''It was the best of times.'''] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: lowercase_ = torch.tensor([1, 2, 3, 4] ) lowercase_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() ) def _lowercase ( self : List[Any] ) -> Tuple: lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() ) def _lowercase ( self : int ) -> Dict: lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() ) def _lowercase ( self : List[str] ) -> Tuple: lowercase_ = 1_0_1 lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
30
0
def lowerCAmelCase_ ( __lowerCamelCase ): if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) __snake_case : List[str] = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 __snake_case : Any = 1 if upper_limit > 0: __snake_case : Optional[int] = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(SCREAMING_SNAKE_CASE__ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("\n********* Catalan Numbers Using Dynamic Programming ************\n") print("\n*** Enter -1 at any time to quit ***") print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="") try: while True: _snake_case : Union[str, Any] = int(input().strip()) if N < 0: print("\n********* Goodbye!! ************") break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("Try another upper limit for the sequence: ", end="") except (NameError, ValueError): print("\n********* Invalid input, goodbye! ************\n") import doctest doctest.testmod()
367
import argparse import os import torch from transformers.utils import WEIGHTS_NAME _snake_case : Union[str, Any] = ["small", "medium", "large"] _snake_case : List[Any] = "lm_head.decoder.weight" _snake_case : Optional[Any] = "lm_head.weight" def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): __snake_case : Tuple = torch.load(__lowerCamelCase ) __snake_case : Dict = d.pop(__lowerCamelCase ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) ) if __name__ == "__main__": _snake_case : Dict = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) _snake_case : Any = parser.parse_args() for MODEL in DIALOGPT_MODELS: _snake_case : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') _snake_case : List[str] = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
134
0
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. lowercase : List[Any] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class A__ ( unittest.TestCase ): """simple docstring""" __A : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __A : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __A : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __A : Tuple = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def __lowercase ( self , lowercase , lowercase , lowercase) -> List[Any]: '''simple docstring''' a__ : Any = ZeroShotClassificationPipeline( model=lowercase , tokenizer=lowercase , candidate_labels=['polics', 'health']) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def __lowercase ( self , lowercase , lowercase) -> int: '''simple docstring''' a__ : Tuple = classifier('Who are you voting for in 2020?' , candidate_labels='politics') self.assertEqual(lowercase , {'sequence': ANY(lowercase), 'labels': [ANY(lowercase)], 'scores': [ANY(lowercase)]}) # No kwarg a__ : Union[str, Any] = classifier('Who are you voting for in 2020?' , ['politics']) self.assertEqual(lowercase , {'sequence': ANY(lowercase), 'labels': [ANY(lowercase)], 'scores': [ANY(lowercase)]}) a__ : Any = classifier('Who are you voting for in 2020?' , candidate_labels=['politics']) self.assertEqual(lowercase , {'sequence': ANY(lowercase), 'labels': [ANY(lowercase)], 'scores': [ANY(lowercase)]}) a__ : Union[str, Any] = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health') self.assertEqual( lowercase , {'sequence': ANY(lowercase), 'labels': [ANY(lowercase), ANY(lowercase)], 'scores': [ANY(lowercase), ANY(lowercase)]}) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])) , 1.0) a__ : List[Any] = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health']) self.assertEqual( lowercase , {'sequence': ANY(lowercase), 'labels': [ANY(lowercase), ANY(lowercase)], 'scores': [ANY(lowercase), ANY(lowercase)]}) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])) , 1.0) a__ : Optional[int] = classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}') self.assertEqual(lowercase , {'sequence': ANY(lowercase), 'labels': [ANY(lowercase)], 'scores': [ANY(lowercase)]}) # https://github.com/huggingface/transformers/issues/13846 a__ : Optional[Any] = classifier(['I am happy'] , ['positive', 'negative']) self.assertEqual( lowercase , [ {'sequence': ANY(lowercase), 'labels': [ANY(lowercase), ANY(lowercase)], 'scores': [ANY(lowercase), ANY(lowercase)]} for i in range(1) ] , ) a__ : Union[str, Any] = classifier(['I am happy', 'I am sad'] , ['positive', 'negative']) self.assertEqual( lowercase , [ {'sequence': ANY(lowercase), 'labels': [ANY(lowercase), ANY(lowercase)], 'scores': [ANY(lowercase), ANY(lowercase)]} for i in range(2) ] , ) with self.assertRaises(lowercase): classifier('' , candidate_labels='politics') with self.assertRaises(lowercase): classifier(lowercase , candidate_labels='politics') with self.assertRaises(lowercase): classifier('Who are you voting for in 2020?' , candidate_labels='') with self.assertRaises(lowercase): classifier('Who are you voting for in 2020?' , candidate_labels=lowercase) with self.assertRaises(lowercase): classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , ) with self.assertRaises(lowercase): classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=lowercase , ) self.run_entailment_id(lowercase) def __lowercase ( self , lowercase) -> List[str]: '''simple docstring''' a__ : Tuple = zero_shot_classifier.model.config a__ : Optional[int] = config.labelaid a__ : List[str] = zero_shot_classifier.entailment_id a__ : Any = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2} self.assertEqual(zero_shot_classifier.entailment_id , -1) a__ : int = {'entailment': 0, 'neutral': 1, 'contradiction': 2} self.assertEqual(zero_shot_classifier.entailment_id , 0) a__ : int = {'ENTAIL': 0, 'NON-ENTAIL': 1} self.assertEqual(zero_shot_classifier.entailment_id , 0) a__ : Optional[int] = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0} self.assertEqual(zero_shot_classifier.entailment_id , 2) a__ : Tuple = original_labelaid self.assertEqual(lowercase , zero_shot_classifier.entailment_id) @require_torch def __lowercase ( self) -> Any: '''simple docstring''' a__ : Union[str, Any] = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( 'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science']) @require_torch def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ : Optional[Any] = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , ) a__ : Union[str, Any] = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science']) self.assertEqual( nested_simplify(lowercase) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.3_33, 0.3_33, 0.3_33], } , ) @require_tf def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ : List[Any] = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , ) a__ : Optional[int] = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science']) self.assertEqual( nested_simplify(lowercase) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.3_33, 0.3_33, 0.3_33], } , ) @slow @require_torch def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : Dict = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt') a__ : Union[str, Any] = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science']) self.assertEqual( nested_simplify(lowercase) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.9_76, 0.0_15, 0.0_09], } , ) a__ : int = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=lowercase , ) self.assertEqual( nested_simplify(lowercase) , { 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , ) @slow @require_tf def __lowercase ( self) -> int: '''simple docstring''' a__ : Dict = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf') a__ : str = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science']) self.assertEqual( nested_simplify(lowercase) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.9_76, 0.0_15, 0.0_09], } , ) a__ : Dict = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=lowercase , ) self.assertEqual( nested_simplify(lowercase) , { 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , )
99
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class A__ : """simple docstring""" def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=[1, 16, 4, 4] , lowercase=None , ) -> List[Any]: '''simple docstring''' a__ : Optional[int] = parent a__ : Optional[int] = batch_size a__ : Any = image_size a__ : Optional[Any] = patch_size a__ : Optional[Any] = num_channels a__ : int = is_training a__ : List[str] = use_labels a__ : List[str] = hidden_size a__ : Tuple = num_hidden_layers a__ : Optional[Any] = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : Optional[int] = hidden_act a__ : Optional[Any] = hidden_dropout_prob a__ : Any = attention_probs_dropout_prob a__ : Any = type_sequence_label_size a__ : Tuple = initializer_range a__ : Tuple = scope a__ : int = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size a__ : Any = (self.image_size // 32) ** 2 a__ : List[Any] = num_patches + 1 def __lowercase ( self) -> Any: '''simple docstring''' a__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a__ : int = None if self.use_labels: a__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) a__ : List[str] = self.get_config() return config, pixel_values, labels def __lowercase ( self) -> Dict: '''simple docstring''' a__ : List[str] = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [4, 8, 16, 32], 'num_groups': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowercase , ) def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]: '''simple docstring''' a__ : List[str] = ViTHybridModel(config=lowercase) model.to(lowercase) model.eval() a__ : Union[str, Any] = model(lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __lowercase ( self , lowercase , lowercase , lowercase) -> Union[str, Any]: '''simple docstring''' a__ : Dict = self.type_sequence_label_size a__ : Union[str, Any] = ViTHybridForImageClassification(lowercase) model.to(lowercase) model.eval() a__ : Tuple = model(lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def __lowercase ( self) -> Any: '''simple docstring''' a__ : List[str] = self.prepare_config_and_inputs() a__ , a__ , a__ : Union[str, Any] = config_and_inputs a__ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __A : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () __A : List[str] = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) __A : Any = False __A : Optional[int] = False __A : Optional[Any] = False def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : Any = ViTHybridModelTester(self) a__ : Any = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37) def __lowercase ( self) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds') def __lowercase ( self) -> Dict: '''simple docstring''' pass def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : str = model_class(lowercase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) a__ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase , nn.Linear)) def __lowercase ( self) -> int: '''simple docstring''' a__ , a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Union[str, Any] = model_class(lowercase) a__ : Union[str, Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[Any] = [*signature.parameters.keys()] a__ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase) def __lowercase ( self) -> Any: '''simple docstring''' a__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase) def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase) def __lowercase ( self) -> Dict: '''simple docstring''' a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Tuple = _config_zero_init(lowercase) for model_class in self.all_model_classes: a__ : List[Any] = model_class(config=lowercase) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": a__ : Dict = [F'{name}.{key}' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __lowercase ( self) -> Any: '''simple docstring''' for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Optional[Any] = ViTHybridModel.from_pretrained(lowercase) self.assertIsNotNone(lowercase) def A_ ( ) -> int: a__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase ( self) -> Optional[Any]: '''simple docstring''' return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def __lowercase ( self) -> Any: '''simple docstring''' a__ : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( lowercase) a__ : List[str] = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Any = image_processor(images=lowercase , return_tensors='pt').to(lowercase) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**lowercase) # verify the logits a__ : Optional[Any] = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , lowercase) a__ : Any = torch.tensor([-1.90_90, -0.49_93, -0.23_89]).to(lowercase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4)) @slow @require_accelerate def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ : List[str] = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384') a__ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto') a__ : Any = prepare_img() a__ : str = image_processor(images=lowercase , return_tensors='pt') a__ : List[Any] = model(**lowercase) a__ : int = outputs.logits # model predicts one of the 1000 ImageNet classes a__ : List[str] = logits.argmax(-1).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat')
99
1
"""simple docstring""" import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( "compression_format, is_archive" , [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ] , ) def __lowerCAmelCase ( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : Dict , lowercase : str , lowercase : Any , lowercase : Optional[Any] , lowercase : int , lowercase : str , lowercase : Union[str, Any] , ) -> List[str]: """simple docstring""" snake_case : List[Any] = { "7z": (seven_zip_file, SevenZipExtractor), "bz2": (bza_file, BzipaExtractor), "gzip": (gz_file, GzipExtractor), "lz4": (lza_file, LzaExtractor), "tar": (tar_file, TarExtractor), "xz": (xz_file, XzExtractor), "zip": (zip_file, ZipExtractor), "zstd": (zstd_file, ZstdExtractor), } snake_case : List[str] = input_paths_and_base_extractors[compression_format] if input_path is None: snake_case : Union[str, Any] = F'for \'{compression_format}\' compression_format, ' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(lowercase ) assert base_extractor.is_extractable(lowercase ) snake_case : Tuple = tmp_path / ("extracted" if is_archive else "extracted.txt") base_extractor.extract(lowercase , lowercase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name snake_case : Dict = file_path.read_text(encoding="utf-8" ) else: snake_case : Optional[Any] = output_path.read_text(encoding="utf-8" ) snake_case : Optional[Any] = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( "compression_format, is_archive" , [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ] , ) def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict , lowercase : str , lowercase : int , lowercase : List[str] , lowercase : List[Any] , lowercase : List[str] , lowercase : List[Any] , lowercase : int , lowercase : Optional[Any] , lowercase : List[str] , ) -> List[Any]: """simple docstring""" snake_case : List[Any] = { "7z": seven_zip_file, "bz2": bza_file, "gzip": gz_file, "lz4": lza_file, "tar": tar_file, "xz": xz_file, "zip": zip_file, "zstd": zstd_file, } snake_case : str = input_paths[compression_format] if input_path is None: snake_case : Optional[Any] = F'for \'{compression_format}\' compression_format, ' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(lowercase ) snake_case : Optional[int] = Extractor.infer_extractor_format(lowercase ) assert extractor_format is not None snake_case : List[str] = tmp_path / ("extracted" if is_archive else "extracted.txt") Extractor.extract(lowercase , lowercase , lowercase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name snake_case : Union[str, Any] = file_path.read_text(encoding="utf-8" ) else: snake_case : Tuple = output_path.read_text(encoding="utf-8" ) snake_case : Any = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.fixture def __lowerCAmelCase ( lowercase : Tuple , lowercase : Optional[Any] ) -> int: """simple docstring""" import tarfile snake_case : Optional[int] = tmp_path / "data_dot_dot" directory.mkdir() snake_case : List[str] = directory / "tar_file_with_dot_dot.tar" with tarfile.TarFile(lowercase , "w" ) as f: f.add(lowercase , arcname=os.path.join(".." , text_file.name ) ) return path @pytest.fixture def __lowerCAmelCase ( lowercase : Any ) -> List[str]: """simple docstring""" import tarfile snake_case : Tuple = tmp_path / "data_sym_link" directory.mkdir() snake_case : Dict = directory / "tar_file_with_sym_link.tar" os.symlink(".." , directory / "subdir" , target_is_directory=lowercase ) with tarfile.TarFile(lowercase , "w" ) as f: f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( "insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , ) def __lowerCAmelCase ( lowercase : List[Any] , lowercase : Tuple , lowercase : int , lowercase : Optional[int] , lowercase : int , lowercase : int ) -> List[str]: """simple docstring""" snake_case : Optional[Any] = { "tar_file_with_dot_dot": tar_file_with_dot_dot, "tar_file_with_sym_link": tar_file_with_sym_link, } snake_case : Any = insecure_tar_files[insecure_tar_file] snake_case : Optional[Any] = tmp_path / "extracted" TarExtractor.extract(lowercase , lowercase ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def __lowerCAmelCase ( lowercase : Tuple ) -> int: """simple docstring""" snake_case : Tuple = tmpdir / "not_a_zip_file" # From: https://github.com/python/cpython/pull/5053 snake_case : List[str] = ( b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00" b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I" b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07" b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82" ) with not_a_zip_file.open("wb" ) as f: f.write(lowercase ) assert zipfile.is_zipfile(str(lowercase ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(lowercase ) # but we're right
357
"""simple docstring""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __snake_case = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def __lowerCAmelCase ( lowercase : Optional[int] ) -> List[str]: """simple docstring""" snake_case : Optional[Any] = list(s_dict.keys() ) for key in keys: snake_case : Any = R".*/layers_(\d+)" snake_case : Tuple = key if re.match(lowercase , lowercase ): snake_case : List[str] = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowercase ) snake_case : Union[str, Any] = R"(encoder|decoder)\/" if re.match(lowercase , lowercase ): snake_case : Any = re.match(lowercase , lowercase ).groups() if groups[0] == "encoder": snake_case : Union[str, Any] = re.sub(R"/mlp/" , R"/1/mlp/" , lowercase ) snake_case : int = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowercase ) elif groups[0] == "decoder": snake_case : str = re.sub(R"/mlp/" , R"/2/mlp/" , lowercase ) snake_case : List[str] = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowercase ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: snake_case : int = new_key.replace(lowercase , lowercase ) print(F'{key} -> {new_key}' ) snake_case : Optional[Any] = s_dict.pop(lowercase ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: snake_case : int = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: snake_case : Optional[Any] = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: snake_case : Tuple = s_dict[key].shape[0] snake_case : int = s_dict[key] for idx in range(lowercase ): snake_case : List[str] = expert_weihts[idx] print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' ) s_dict.pop(lowercase ) return s_dict __snake_case = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def __lowerCAmelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> int: """simple docstring""" import regex as re with open(lowercase , "r" ) as f: snake_case : List[str] = f.read() snake_case : Tuple = re.findall(R"(.*) = ([0-9.]*)" , lowercase ) snake_case : Any = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": snake_case : Tuple = float(lowercase ) if "." in value else int(lowercase ) snake_case : List[str] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowercase )[0] snake_case : List[Any] = str(activation[1] ) snake_case : Optional[Any] = num_experts snake_case : List[Any] = SwitchTransformersConfig(**lowercase ) return config def __lowerCAmelCase ( lowercase : Tuple , lowercase : Tuple , lowercase : Union[str, Any]=None , lowercase : Any="./" , lowercase : int=8 ) -> Dict: """simple docstring""" print(F'Loading flax weights from : {flax_checkpoint_path}' ) snake_case : Union[str, Any] = checkpoints.load_tax_checkpoint(lowercase ) if gin_file is not None: snake_case : List[str] = convert_gin_to_config(lowercase , lowercase ) else: snake_case : str = SwitchTransformersConfig.from_pretrained(lowercase ) snake_case : Any = SwitchTransformersForConditionalGeneration(lowercase ) snake_case : Optional[Any] = flax_params["target"] snake_case : Optional[int] = flatten_dict(lowercase , sep="/" ) snake_case : Optional[Any] = rename_keys(lowercase ) snake_case : List[str] = unflatten_dict(lowercase , sep="/" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(lowercase , lowercase ) print(F'Save PyTorch model to {pytorch_dump_path}' ) pt_model.save_pretrained(lowercase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") __snake_case = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
112
0
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _UpperCAmelCase( UpperCamelCase_ ): def __init__( self , __a , __a , __a , __a = None , ) -> Optional[int]: '''simple docstring''' super().__init__() self.register_modules(transformer=lowercase_ , vae=lowercase_ , scheduler=lowercase_) # create a imagenet -> id dictionary for easier use _UpperCamelCase = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(''','''): _UpperCamelCase = int(lowercase_) _UpperCamelCase = dict(sorted(self.labels.items())) def UpperCAmelCase ( self , __a) -> List[int]: '''simple docstring''' if not isinstance(lowercase_ , lowercase_): _UpperCamelCase = list(lowercase_) for l in label: if l not in self.labels: raise ValueError( F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''') return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , __a , __a = 4.0 , __a = None , __a = 50 , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' _UpperCamelCase = len(lowercase_) _UpperCamelCase = self.transformer.config.sample_size _UpperCamelCase = self.transformer.config.in_channels _UpperCamelCase = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase_ , device=self.device , dtype=self.transformer.dtype , ) _UpperCamelCase = torch.cat([latents] * 2) if guidance_scale > 1 else latents _UpperCamelCase = torch.tensor(lowercase_ , device=self.device).reshape(-1) _UpperCamelCase = torch.tensor([10_00] * batch_size , device=self.device) _UpperCamelCase = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowercase_) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: _UpperCamelCase = latent_model_input[: len(lowercase_) // 2] _UpperCamelCase = torch.cat([half, half] , dim=0) _UpperCamelCase = self.scheduler.scale_model_input(lowercase_ , lowercase_) _UpperCamelCase = t if not torch.is_tensor(lowercase_): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) _UpperCamelCase = latent_model_input.device.type == 'mps' if isinstance(lowercase_ , lowercase_): _UpperCamelCase = torch.floataa if is_mps else torch.floataa else: _UpperCamelCase = torch.intaa if is_mps else torch.intaa _UpperCamelCase = torch.tensor([timesteps] , dtype=lowercase_ , device=latent_model_input.device) elif len(timesteps.shape) == 0: _UpperCamelCase = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML _UpperCamelCase = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output _UpperCamelCase = self.transformer( lowercase_ , timestep=lowercase_ , class_labels=lowercase_).sample # perform guidance if guidance_scale > 1: _UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] _UpperCamelCase = torch.split(lowercase_ , len(lowercase_) // 2 , dim=0) _UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps) _UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0) _UpperCamelCase = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: _UpperCamelCase = torch.split(lowercase_ , lowercase_ , dim=1) else: _UpperCamelCase = noise_pred # compute previous image: x_t -> x_t-1 _UpperCamelCase = self.scheduler.step(lowercase_ , lowercase_ , lowercase_).prev_sample if guidance_scale > 1: _UpperCamelCase = latent_model_input.chunk(2 , dim=0) else: _UpperCamelCase = latent_model_input _UpperCamelCase = 1 / self.vae.config.scaling_factor * latents _UpperCamelCase = self.vae.decode(lowercase_).sample _UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": _UpperCamelCase = self.numpy_to_pil(lowercase_) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowercase_)
194
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class A ( ctypes.Structure ): # _fields is a specific attr expected by ctypes UpperCamelCase__ : List[Any] =[('size', ctypes.c_int), ('visible', ctypes.c_byte)] def a_ ( ): '''simple docstring''' if os.name == "nt": _lowerCamelCase : Optional[Any] =CursorInfo() _lowerCamelCase : Dict =ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) _lowerCamelCase : Any =False ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def a_ ( ): '''simple docstring''' if os.name == "nt": _lowerCamelCase : Any =CursorInfo() _lowerCamelCase : Optional[Any] =ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) _lowerCamelCase : Union[str, Any] =True ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def a_ ( ): '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
199
0
"""simple docstring""" import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __UpperCamelCase : Tuple = trt.Logger(trt.Logger.WARNING) __UpperCamelCase : Dict = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __UpperCamelCase : Any = logging.getLogger(__name__) __UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=384, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=128, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) __UpperCamelCase : Optional[int] = parser.parse_args() if args.tokenizer_name: __UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) __UpperCamelCase : Optional[Any] = args.per_device_eval_batch_size __UpperCamelCase : Union[str, Any] = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __UpperCamelCase : List[str] = True __UpperCamelCase : int = '''temp_engine/bert-fp32.engine''' if args.fpaa: __UpperCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine''' if args.inta: __UpperCamelCase : int = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') __UpperCamelCase : Tuple = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __UpperCamelCase : Optional[int] = [network.get_input(i) for i in range(network.num_inputs)] __UpperCamelCase : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __UpperCamelCase : Dict = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __UpperCamelCase : Tuple = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __UpperCamelCase : Optional[Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ): lowerCAmelCase = np.asarray(inputs['input_ids'] , dtype=np.intaa ) lowerCAmelCase = np.asarray(inputs['attention_mask'] , dtype=np.intaa ) lowerCAmelCase = np.asarray(inputs['token_type_ids'] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _UpperCAmelCase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _UpperCAmelCase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _UpperCAmelCase ) # start time lowerCAmelCase = time.time() # Run inference context.execute_async( bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) cuda.memcpy_dtoh_async(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Synchronize the stream and take time stream.synchronize() # end time lowerCAmelCase = time.time() lowerCAmelCase = end_time - start_time lowerCAmelCase = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __UpperCamelCase : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __UpperCamelCase : Optional[Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __UpperCamelCase : List[str] = raw_datasets['''validation'''].column_names __UpperCamelCase : Any = '''question''' if '''question''' in column_names else column_names[0] __UpperCamelCase : Optional[int] = '''context''' if '''context''' in column_names else column_names[1] __UpperCamelCase : Optional[int] = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __UpperCamelCase : List[Any] = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __UpperCamelCase : Optional[int] = min(args.max_seq_length, tokenizer.model_max_length) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace lowerCAmelCase = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. lowerCAmelCase = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=_UpperCAmelCase , stride=args.doc_stride , return_overflowing_tokens=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , padding='max_length' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. lowerCAmelCase = tokenized_examples.pop('overflow_to_sample_mapping' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. lowerCAmelCase = [] for i in range(len(tokenized_examples['input_ids'] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). lowerCAmelCase = tokenized_examples.sequence_ids(_UpperCAmelCase ) lowerCAmelCase = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. lowerCAmelCase = sample_mapping[i] tokenized_examples["example_id"].append(examples['id'][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. lowerCAmelCase = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['offset_mapping'][i] ) ] return tokenized_examples __UpperCamelCase : Any = raw_datasets['''validation'''] # Validation Feature Creation __UpperCamelCase : Union[str, Any] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) __UpperCamelCase : Any = default_data_collator __UpperCamelCase : Tuple = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) __UpperCamelCase : Optional[Any] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. lowerCAmelCase = postprocess_qa_predictions( examples=_UpperCAmelCase , features=_UpperCAmelCase , predictions=_UpperCAmelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_UpperCAmelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: lowerCAmelCase = [ {'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items() ] else: lowerCAmelCase = [{'id': k, 'prediction_text': v} for k, v in predictions.items()] lowerCAmelCase = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=_UpperCAmelCase , label_ids=_UpperCAmelCase ) __UpperCamelCase : int = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ): return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize # Allocate device memory for inputs and outputs. __UpperCamelCase : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __UpperCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __UpperCamelCase : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __UpperCamelCase : List[str] = cuda.mem_alloc(h_outputa.nbytes) __UpperCamelCase : int = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __UpperCamelCase : Union[str, Any] = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') __UpperCamelCase : Tuple = 0.0 __UpperCamelCase : Optional[Any] = 0 __UpperCamelCase : Tuple = timeit.default_timer() __UpperCamelCase : Tuple = None for step, batch in enumerate(eval_dataloader): __UpperCamelCase ,__UpperCamelCase : List[str] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __UpperCamelCase ,__UpperCamelCase : str = outputs __UpperCamelCase : List[Any] = torch.tensor(start_logits) __UpperCamelCase : Optional[int] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __UpperCamelCase : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __UpperCamelCase : Any = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __UpperCamelCase : Optional[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __UpperCamelCase : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __UpperCamelCase : List[Any] = nested_truncate(all_preds, len(eval_dataset)) __UpperCamelCase : List[str] = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000)) logger.info('''Total Number of Inference = %d''', niter) __UpperCamelCase : List[str] = post_processing_function(eval_examples, eval_dataset, all_preds) __UpperCamelCase : Union[str, Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
309
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ): lowerCAmelCase = split_dict._to_yaml_list() assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
309
1
from collections.abc import Sequence def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: return sum(c * (x**i) for i, c in enumerate(_SCREAMING_SNAKE_CASE ) ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: lowerCamelCase : str = 0.0 for coeff in reversed(_SCREAMING_SNAKE_CASE ): lowerCamelCase : Tuple = result * x + coeff return result if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = (0.0, 0.0, 5.0, 9.3, 7.0) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
48
from math import sqrt def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : int = 0 lowerCamelCase : int = 0 lowerCamelCase : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
48
1
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase : def __init__( self :Optional[Any] , _lowercase :int , _lowercase :Union[str, Any]=13 , _lowercase :Dict=7 , _lowercase :List[Any]=True , _lowercase :Union[str, Any]=True , _lowercase :Optional[Any]=True , _lowercase :Optional[Any]=True , _lowercase :Dict=99 , _lowercase :Optional[int]=32 , _lowercase :str=5 , _lowercase :str=4 , _lowercase :Dict=37 , _lowercase :str="gelu" , _lowercase :Optional[Any]=0.1 , _lowercase :List[str]=0.1 , _lowercase :List[str]=5_12 , _lowercase :List[str]=16 , _lowercase :Optional[Any]=2 , _lowercase :Union[str, Any]=0.02 , _lowercase :Any=3 , _lowercase :Optional[Any]=4 , _lowercase :List[str]=None , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_labels lowercase__ = num_choices lowercase__ = scope def UpperCAmelCase ( self :Dict ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ = ids_tensor([self.batch_size] , self.num_choices ) lowercase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self :List[str] ): '''simple docstring''' return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , ) def UpperCAmelCase ( self :int , _lowercase :str , _lowercase :Optional[int] , _lowercase :str , _lowercase :int , _lowercase :Tuple , _lowercase :Union[str, Any] , _lowercase :int ): '''simple docstring''' lowercase__ = NystromformerModel(config=_lowercase ) model.to(_lowercase ) model.eval() lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) lowercase__ = model(_lowercase , token_type_ids=_lowercase ) lowercase__ = model(_lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self :List[str] , _lowercase :Optional[int] , _lowercase :Tuple , _lowercase :Tuple , _lowercase :int , _lowercase :str , _lowercase :Union[str, Any] , _lowercase :Tuple ): '''simple docstring''' lowercase__ = NystromformerForMaskedLM(config=_lowercase ) model.to(_lowercase ) model.eval() lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self :List[str] , _lowercase :List[str] , _lowercase :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int] , _lowercase :List[str] , _lowercase :int , _lowercase :List[Any] ): '''simple docstring''' lowercase__ = NystromformerForQuestionAnswering(config=_lowercase ) model.to(_lowercase ) model.eval() lowercase__ = model( _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self :int , _lowercase :Tuple , _lowercase :str , _lowercase :Optional[Any] , _lowercase :int , _lowercase :int , _lowercase :Any , _lowercase :Tuple ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = NystromformerForSequenceClassification(_lowercase ) model.to(_lowercase ) model.eval() lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self :int , _lowercase :str , _lowercase :int , _lowercase :Tuple , _lowercase :Dict , _lowercase :int , _lowercase :Dict , _lowercase :str ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = NystromformerForTokenClassification(config=_lowercase ) model.to(_lowercase ) model.eval() lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :str , _lowercase :Optional[Any] , _lowercase :Tuple , _lowercase :Any , _lowercase :int , _lowercase :str ): '''simple docstring''' lowercase__ = self.num_choices lowercase__ = NystromformerForMultipleChoice(config=_lowercase ) model.to(_lowercase ) model.eval() lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ = model( _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self :Dict ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = config_and_inputs lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): __lowerCamelCase = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) __lowerCamelCase = ( { 'feature-extraction': NystromformerModel, 'fill-mask': NystromformerForMaskedLM, 'question-answering': NystromformerForQuestionAnswering, 'text-classification': NystromformerForSequenceClassification, 'token-classification': NystromformerForTokenClassification, 'zero-shot': NystromformerForSequenceClassification, } if is_torch_available() else {} ) __lowerCamelCase = False __lowerCamelCase = False def UpperCAmelCase ( self :Union[str, Any] ): '''simple docstring''' lowercase__ = NystromformerModelTester(self ) lowercase__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 ) def UpperCAmelCase ( self :Dict ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowercase ) def UpperCAmelCase ( self :Union[str, Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__ = type self.model_tester.create_and_check_model(*_lowercase ) def UpperCAmelCase ( self :List[str] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowercase ) def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowercase ) def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowercase ) def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowercase ) def UpperCAmelCase ( self :Any ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowercase ) @slow def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = NystromformerModel.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) @require_torch class lowerCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase ( self :Dict ): '''simple docstring''' lowercase__ = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" ) lowercase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): lowercase__ = model(_lowercase )[0] lowercase__ = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , _lowercase ) lowercase__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) ) @slow def UpperCAmelCase ( self :int ): '''simple docstring''' lowercase__ = "the [MASK] of Belgium is Brussels" lowercase__ = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" ) lowercase__ = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" ) lowercase__ = tokenizer(_lowercase , return_tensors="pt" ) with torch.no_grad(): lowercase__ = model(encoding.input_ids ).logits lowercase__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(_lowercase ) , "capital" )
201
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class lowerCAmelCase ( yaml.SafeLoader ): def UpperCAmelCase ( self :Optional[Any] , _lowercase :Any ): '''simple docstring''' lowercase__ = [self.constructed_objects[key_node] for key_node, _ in node.value] lowercase__ = [tuple(_lowercase ) if isinstance(_lowercase , _lowercase ) else key for key in keys] lowercase__ = Counter(_lowercase ) lowercase__ = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' ) def UpperCAmelCase ( self :Any , _lowercase :str , _lowercase :Dict=False ): '''simple docstring''' lowercase__ = super().construct_mapping(_lowercase , deep=_lowercase ) self._check_no_duplicates_on_constructed_node(_lowercase ) return mapping def _A ( __magic_name__ ): lowercase__ = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowercase__ = full_content[1:].index("---" ) + 1 lowercase__ = "\n".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(__magic_name__ ) class lowerCAmelCase ( lowercase_ ): # class attributes __lowerCamelCase = {'train_eval_index'} # train-eval-index in the YAML metadata @classmethod def UpperCAmelCase ( cls :Dict , _lowercase :Path ): '''simple docstring''' with open(_lowercase , encoding="utf-8" ) as readme_file: lowercase__ , lowercase__ = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(_lowercase ) else: return cls() def UpperCAmelCase ( self :Any , _lowercase :Path ): '''simple docstring''' if path.exists(): with open(_lowercase , encoding="utf-8" ) as readme_file: lowercase__ = readme_file.read() else: lowercase__ = None lowercase__ = self._to_readme(_lowercase ) with open(_lowercase , "w" , encoding="utf-8" ) as readme_file: readme_file.write(_lowercase ) def UpperCAmelCase ( self :List[str] , _lowercase :Optional[str] = None ): '''simple docstring''' if readme_content is not None: lowercase__ , lowercase__ = _split_yaml_from_readme(_lowercase ) lowercase__ = "---\n" + self.to_yaml_string() + "---\n" + content else: lowercase__ = "---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def UpperCAmelCase ( cls :Any , _lowercase :str ): '''simple docstring''' lowercase__ = yaml.load(_lowercase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowercase__ = { (key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**_lowercase ) def UpperCAmelCase ( self :Union[str, Any] ): '''simple docstring''' return yaml.safe_dump( { (key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=_lowercase , allow_unicode=_lowercase , encoding="utf-8" , ).decode("utf-8" ) _snake_case = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser _snake_case = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") _snake_case = ap.parse_args() _snake_case = Path(args.readme_filepath) _snake_case = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
201
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a__ : Dict = logging.get_logger(__name__) a__ : Any = { 'microsoft/table-transformer-detection': ( 'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json' ), } class lowercase_ ( a__ ): __UpperCAmelCase = 'table-transformer' __UpperCAmelCase = ['past_key_values'] __UpperCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , a=True , a=None , a=3 , a=1_00 , a=6 , a=20_48 , a=8 , a=6 , a=20_48 , a=8 , a=0.0 , a=0.0 , a=True , a="relu" , a=2_56 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=False , a="sine" , a="resnet50" , a=True , a=False , a=1 , a=5 , a=2 , a=1 , a=1 , a=5 , a=2 , a=0.1 , **a , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCamelCase__ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(a , a ): UpperCamelCase__ = backbone_config.get("model_type" ) UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type] UpperCamelCase__ = config_class.from_dict(a ) # set timm attributes to None UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None, None, None UpperCamelCase__ = use_timm_backbone UpperCamelCase__ = backbone_config UpperCamelCase__ = num_channels UpperCamelCase__ = num_queries UpperCamelCase__ = d_model UpperCamelCase__ = encoder_ffn_dim UpperCamelCase__ = encoder_layers UpperCamelCase__ = encoder_attention_heads UpperCamelCase__ = decoder_ffn_dim UpperCamelCase__ = decoder_layers UpperCamelCase__ = decoder_attention_heads UpperCamelCase__ = dropout UpperCamelCase__ = attention_dropout UpperCamelCase__ = activation_dropout UpperCamelCase__ = activation_function UpperCamelCase__ = init_std UpperCamelCase__ = init_xavier_std UpperCamelCase__ = encoder_layerdrop UpperCamelCase__ = decoder_layerdrop UpperCamelCase__ = encoder_layers UpperCamelCase__ = auxiliary_loss UpperCamelCase__ = position_embedding_type UpperCamelCase__ = backbone UpperCamelCase__ = use_pretrained_backbone UpperCamelCase__ = dilation # Hungarian matcher UpperCamelCase__ = class_cost UpperCamelCase__ = bbox_cost UpperCamelCase__ = giou_cost # Loss coefficients UpperCamelCase__ = mask_loss_coefficient UpperCamelCase__ = dice_loss_coefficient UpperCamelCase__ = bbox_loss_coefficient UpperCamelCase__ = giou_loss_coefficient UpperCamelCase__ = eos_coefficient super().__init__(is_encoder_decoder=a , **a ) @property def __a ( self ): return self.encoder_attention_heads @property def __a ( self ): return self.d_model class lowercase_ ( a__ ): __UpperCAmelCase = version.parse('1.11' ) @property def __a ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def __a ( self ): return 1e-5 @property def __a ( self ): return 12
80
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case : Optional[Any] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : List[Any] = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : str = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
134
0
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class lowercase_ ( __SCREAMING_SNAKE_CASE ): A__ : int = 0 A__ : bool = False A__ : float = 3.0 class lowercase_ ( unittest.TestCase ): def lowerCamelCase_ ( self ): """simple docstring""" self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCamelCase ).to_kwargs() , {"""a""": 2, """b""": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} ) @require_cuda def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 ) AcceleratorState._reset_state() UpperCamelCase_ = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) UpperCamelCase_ = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_0_0_0 ) self.assertEqual(scaler._enabled , __UpperCamelCase ) @require_multi_gpu def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": _A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) _A = Accelerator(kwargs_handlers=[ddp_scaler]) _A = torch.nn.Linear(100, 200) _A = accelerator.prepare(model) # Check the values changed in kwargs _A = '''''' _A = model.bucket_bytes_cap // (1_024 * 1_024) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
261
import copy import re class lowercase_ : A__ : Optional[Any] = """hp""" A__ : Union[str, Any] = {} A__ : Optional[int] = None @classmethod def lowerCamelCase_ ( cls , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = prefix UpperCamelCase_ = defaults cls.build_naming_info() @staticmethod def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: return "" UpperCamelCase_ = None if any(char.isdigit() for char in word ): raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__UpperCamelCase ) + 1 ): UpperCamelCase_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: UpperCamelCase_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__UpperCamelCase ): UpperCamelCase_ = """""" while integer != 0: UpperCamelCase_ = chr(ord("""A""" ) + integer % 1_0 ) + s integer //= 1_0 return s UpperCamelCase_ = 0 while True: UpperCamelCase_ = word + """#""" + int_to_alphabetic(__UpperCamelCase ) if sword in info["reverse_short_word"]: continue else: UpperCamelCase_ = sword break UpperCamelCase_ = short_word UpperCamelCase_ = word return short_word @staticmethod def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = param_name.split("""_""" ) UpperCamelCase_ = [TrialShortNamer.shortname_for_word(__UpperCamelCase , __UpperCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name UpperCamelCase_ = ["""""", """_"""] for separator in separators: UpperCamelCase_ = separator.join(__UpperCamelCase ) if shortname not in info["reverse_short_param"]: UpperCamelCase_ = shortname UpperCamelCase_ = param_name return shortname return param_name @staticmethod def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = TrialShortNamer.shortname_for_key(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase_ = short_name UpperCamelCase_ = param_name @classmethod def lowerCamelCase_ ( cls ): """simple docstring""" if cls.NAMING_INFO is not None: return UpperCamelCase_ = { """short_word""": {}, """reverse_short_word""": {}, """short_param""": {}, """reverse_short_param""": {}, } UpperCamelCase_ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase_ = info @classmethod def lowerCamelCase_ ( cls , __UpperCamelCase ): """simple docstring""" cls.build_naming_info() assert cls.PREFIX is not None UpperCamelCase_ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue UpperCamelCase_ = cls.NAMING_INFO["""short_param"""][k] if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCamelCase_ = 1 if v else 0 UpperCamelCase_ = """""" if isinstance(__UpperCamelCase , (int, float) ) else """-""" UpperCamelCase_ = f'''{key}{sep}{v}''' name.append(__UpperCamelCase ) return "_".join(__UpperCamelCase ) @classmethod def lowerCamelCase_ ( cls , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = repr[len(cls.PREFIX ) + 1 :] if repr == "": UpperCamelCase_ = [] else: UpperCamelCase_ = repr.split("""_""" ) UpperCamelCase_ = {} for value in values: if "-" in value: UpperCamelCase_ , UpperCamelCase_ = value.split("""-""" ) else: UpperCamelCase_ = re.sub("""[0-9.]""" , """""" , __UpperCamelCase ) UpperCamelCase_ = float(re.sub("""[^0-9.]""" , """""" , __UpperCamelCase ) ) UpperCamelCase_ = cls.NAMING_INFO["""reverse_short_param"""][p_k] UpperCamelCase_ = p_v for k in cls.DEFAULTS: if k not in parameters: UpperCamelCase_ = cls.DEFAULTS[k] return parameters
261
1
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def _a ( *a :int , a :Optional[Union[Dict, Any]] = None , a :List[Any]=True , a :Union[str, Any]=2 ) -> int: from .. import __version__ a = take_from a = () if not isinstance(args[0] , _lowerCamelCase ): a = (args,) for attribute, version_name, message in args: if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ): raise ValueError( F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'""" F""" version {__version__} is >= {version_name}""" ) a = None if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(_lowerCamelCase ),) a = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(_lowerCamelCase , _lowerCamelCase ): values += (getattr(_lowerCamelCase , _lowerCamelCase ),) a = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: a = F"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: a = warning + """ """ if standard_warn else """""" warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0: a = inspect.getouterframes(inspect.currentframe() )[1] a = call_frame.filename a = call_frame.lineno a = call_frame.function a = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" ) if len(_lowerCamelCase ) == 0: return elif len(_lowerCamelCase ) == 1: return values[0] return values
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ : Any = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[Any] = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
112
0
'''simple docstring''' from __future__ import annotations from cmath import sqrt def a_ ( __snake_case : int , __snake_case : int , __snake_case : int ) -> tuple[complex, complex]: """simple docstring""" if a == 0: raise ValueError('''Coefficient \'a\' must not be zero.''' ) lowerCamelCase_ =b * b - 4 * a * c lowerCamelCase_ =(-b + sqrt(__snake_case )) / (2 * a) lowerCamelCase_ =(-b - sqrt(__snake_case )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def a_ ( ) -> Any: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =quadratic_roots(a=5 , b=6 , c=1 ) print(F'''The solutions are: {solutiona} and {solutiona}''' ) if __name__ == "__main__": main()
6
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union a_ : Tuple = TypeVar("""T""") a_ : Dict = Union[List[T], Tuple[T, ...]] a_ : int = Union[T, List[T], Dict[str, T]] a_ : Optional[Any] = Union[str, bytes, os.PathLike]
6
1
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate UpperCamelCase_ = trt.Logger(trt.Logger.WARNING) UpperCamelCase_ = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) UpperCamelCase_ = logging.getLogger(__name__) UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--onnx_model_path""", default=None, type=str, required=True, help="""Path to ONNX model: """, ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""The output directory where the model checkpoints and predictions will be written.""", ) # Other parameters parser.add_argument( """--tokenizer_name""", default="""""", type=str, required=True, help="""Pretrained tokenizer name or path if not the same as model_name""", ) parser.add_argument( """--version_2_with_negative""", action="""store_true""", help="""If true, the SQuAD examples contain some that do not have an answer.""", ) parser.add_argument( """--null_score_diff_threshold""", type=float, default=0.0, help="""If null_score - best_non_null is greater than the threshold predict null.""", ) parser.add_argument( """--max_seq_length""", default=3_84, type=int, help=( """The maximum total input sequence length after WordPiece tokenization. Sequences """ """longer than this will be truncated, and sequences shorter than this will be padded.""" ), ) parser.add_argument( """--doc_stride""", default=1_28, type=int, help="""When splitting up a long document into chunks, how much stride to take between chunks.""", ) parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""") parser.add_argument( """--n_best_size""", default=20, type=int, help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""", ) parser.add_argument( """--max_answer_length""", default=30, type=int, help=( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ), ) parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""") parser.add_argument( """--dataset_name""", type=str, default=None, required=True, help="""The name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--dataset_config_name""", type=str, default=None, help="""The configuration name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data.""" ) parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""") parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision instead of 32-bit""", ) parser.add_argument( """--int8""", action="""store_true""", help="""Whether to use INT8""", ) UpperCamelCase_ = parser.parse_args() if args.tokenizer_name: UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) logger.info("""Training/evaluation parameters %s""", args) UpperCamelCase_ = args.per_device_eval_batch_size UpperCamelCase_ = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties UpperCamelCase_ = True UpperCamelCase_ = """temp_engine/bert-fp32.engine""" if args.fpaa: UpperCamelCase_ = """temp_engine/bert-fp16.engine""" if args.inta: UpperCamelCase_ = """temp_engine/bert-int8.engine""" # import ONNX file if not os.path.exists("""temp_engine"""): os.makedirs("""temp_engine""") UpperCamelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, """rb""") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network UpperCamelCase_ = [network.get_input(i) for i in range(network.num_inputs)] UpperCamelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: UpperCamelCase_ = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) UpperCamelCase_ = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) UpperCamelCase_ = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, """wb""") as f: f.write(engine.serialize()) def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ) -> int: _lowerCAmelCase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) _lowerCAmelCase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) _lowerCAmelCase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCamelCase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCamelCase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCamelCase ) # start time _lowerCAmelCase : str = time.time() # Run inference context.execute_async( bindings=[int(_lowerCamelCase ) for d_inp in d_inputs] + [int(_lowerCamelCase ), int(_lowerCamelCase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Synchronize the stream and take time stream.synchronize() # end time _lowerCAmelCase : int = time.time() _lowerCAmelCase : int = end_time - start_time _lowerCAmelCase : Dict = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. UpperCamelCase_ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. UpperCamelCase_ = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("""Evaluation requires a dataset name""") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. UpperCamelCase_ = raw_datasets["""validation"""].column_names UpperCamelCase_ = """question""" if """question""" in column_names else column_names[0] UpperCamelCase_ = """context""" if """context""" in column_names else column_names[1] UpperCamelCase_ = """answers""" if """answers""" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). UpperCamelCase_ = tokenizer.padding_side == """right""" if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) UpperCamelCase_ = min(args.max_seq_length, tokenizer.model_max_length) def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Any: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace _lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. _lowerCAmelCase : Optional[Any] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=_lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. _lowerCAmelCase : int = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. _lowerCAmelCase : int = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). _lowerCAmelCase : List[Any] = tokenized_examples.sequence_ids(_lowerCamelCase ) _lowerCAmelCase : Dict = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. _lowerCAmelCase : Optional[Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. _lowerCAmelCase : str = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples UpperCamelCase_ = raw_datasets["""validation"""] # Validation Feature Creation UpperCamelCase_ = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="""Running tokenizer on validation dataset""", ) UpperCamelCase_ = default_data_collator UpperCamelCase_ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""]) UpperCamelCase_ = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int]="eval" ) -> int: # Post-processing: we match the start logits and end logits to answers in the original context. _lowerCAmelCase : Optional[int] = postprocess_qa_predictions( examples=_lowerCamelCase , features=_lowerCamelCase , predictions=_lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCamelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: _lowerCAmelCase : int = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: _lowerCAmelCase : int = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] _lowerCAmelCase : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=_lowerCamelCase , label_ids=_lowerCamelCase ) UpperCamelCase_ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""") # Evaluation! logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path) with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _UpperCAmelCase ( _lowerCamelCase : Dict ) -> int: return trt.volume(engine.get_binding_shape(_lowerCamelCase ) ) * engine.get_binding_dtype(_lowerCamelCase ).itemsize # Allocate device memory for inputs and outputs. UpperCamelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes) UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. UpperCamelCase_ = cuda.Stream() # Evaluation logger.info("""***** Running Evaluation *****""") logger.info(F' Num examples = {len(eval_dataset)}') logger.info(F' Batch size = {args.per_device_eval_batch_size}') UpperCamelCase_ = 0.0 UpperCamelCase_ = 0 UpperCamelCase_ = timeit.default_timer() UpperCamelCase_ = None for step, batch in enumerate(eval_dataloader): UpperCamelCase_ , UpperCamelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 UpperCamelCase_ , UpperCamelCase_ = outputs UpperCamelCase_ = torch.tensor(start_logits) UpperCamelCase_ = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered UpperCamelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) UpperCamelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) UpperCamelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) UpperCamelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: UpperCamelCase_ = nested_truncate(all_preds, len(eval_dataset)) UpperCamelCase_ = timeit.default_timer() - start_time logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 10_00 / niter)) logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 10_00)) logger.info("""Total Number of Inference = %d""", niter) UpperCamelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds) UpperCamelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'Evaluation metrics: {eval_metric}')
309
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""DPTFeatureExtractor"""] UpperCamelCase_ = ["""DPTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """DPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DPTForDepthEstimation""", """DPTForSemanticSegmentation""", """DPTModel""", """DPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
309
1
from sklearn.metrics import mean_squared_error import datasets A_ : str = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' A_ : int = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n' A_ : Optional[Any] = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a (datasets.Metric ): '''simple docstring''' def __A ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html""" ] , ) def __A ( self ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("""float""" ) ), "references": datasets.Sequence(datasets.Value("""float""" ) ), } else: return { "predictions": datasets.Value("""float""" ), "references": datasets.Value("""float""" ), } def __A ( self , A__ , A__ , A__=None , A__="uniform_average" , A__=True ): A__ : Optional[Any] = mean_squared_error( _a , _a , sample_weight=_a , multioutput=_a , squared=_a ) return {"mse": mse}
350
from typing import Any def UpperCamelCase (lowercase_: list ) -> list[Any]: if not input_list: return [] A__ : Any = [input_list.count(lowercase_ ) for value in input_list] A__ : List[Any] = max(lowercase_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
141
0
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'huggingface/informer-tourism-monthly': ( 'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json' ), # See all Informer models at https://huggingface.co/models?filter=informer } class lowercase__ ( __lowerCamelCase ): '''simple docstring''' a : Optional[int] = "informer" a : Tuple = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self, __magic_name__ = None, __magic_name__ = None, __magic_name__ = "student_t", __magic_name__ = "nll", __magic_name__ = 1, __magic_name__ = None, __magic_name__ = "mean", __magic_name__ = 0, __magic_name__ = 0, __magic_name__ = 0, __magic_name__ = 0, __magic_name__ = None, __magic_name__ = None, __magic_name__ = 64, __magic_name__ = 32, __magic_name__ = 32, __magic_name__ = 2, __magic_name__ = 2, __magic_name__ = 2, __magic_name__ = 2, __magic_name__ = True, __magic_name__ = "gelu", __magic_name__ = 0.05, __magic_name__ = 0.1, __magic_name__ = 0.1, __magic_name__ = 0.1, __magic_name__ = 0.1, __magic_name__ = 100, __magic_name__ = 0.02, __magic_name__=True, __magic_name__ = "prob", __magic_name__ = 5, __magic_name__ = True, **__magic_name__, ) -> Optional[int]: """simple docstring""" # time series specific configuration UpperCamelCase__ : List[Any] = prediction_length UpperCamelCase__ : Any = context_length or prediction_length UpperCamelCase__ : Optional[int] = distribution_output UpperCamelCase__ : Union[str, Any] = loss UpperCamelCase__ : Optional[Any] = input_size UpperCamelCase__ : Dict = num_time_features UpperCamelCase__ : Dict = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] UpperCamelCase__ : Optional[int] = scaling UpperCamelCase__ : Any = num_dynamic_real_features UpperCamelCase__ : Optional[int] = num_static_real_features UpperCamelCase__ : Optional[Any] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(__magic_name__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) UpperCamelCase__ : str = cardinality else: UpperCamelCase__ : int = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(__magic_name__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) UpperCamelCase__ : Any = embedding_dimension else: UpperCamelCase__ : Union[str, Any] = [min(50, (cat + 1) // 2 ) for cat in self.cardinality] UpperCamelCase__ : Optional[Any] = num_parallel_samples # Transformer architecture configuration UpperCamelCase__ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features UpperCamelCase__ : Optional[Any] = d_model UpperCamelCase__ : Tuple = encoder_attention_heads UpperCamelCase__ : Any = decoder_attention_heads UpperCamelCase__ : Dict = encoder_ffn_dim UpperCamelCase__ : Optional[Any] = decoder_ffn_dim UpperCamelCase__ : str = encoder_layers UpperCamelCase__ : Optional[int] = decoder_layers UpperCamelCase__ : Optional[Any] = dropout UpperCamelCase__ : List[Any] = attention_dropout UpperCamelCase__ : Any = activation_dropout UpperCamelCase__ : Optional[int] = encoder_layerdrop UpperCamelCase__ : Union[str, Any] = decoder_layerdrop UpperCamelCase__ : Tuple = activation_function UpperCamelCase__ : List[str] = init_std UpperCamelCase__ : int = use_cache # Informer UpperCamelCase__ : Optional[int] = attention_type UpperCamelCase__ : Optional[int] = sampling_factor UpperCamelCase__ : Dict = distil super().__init__(is_encoder_decoder=__magic_name__, **__magic_name__ ) @property def UpperCamelCase__ ( self ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
201
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: int ) -> float: if digit_amount > 0: return round(number - int(__UpperCAmelCase ) , __UpperCAmelCase ) return number - int(__UpperCAmelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
201
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva a_ = "" a_ = "" a_ = "" a_ = 1 # (0 is vertical, 1 is horizontal) def a__ ( ) -> None: _A , _A = get_dataset(__lowercase , __lowercase ) print("Processing..." ) _A , _A , _A = update_image_and_anno(__lowercase , __lowercase , __lowercase ) for index, image in enumerate(__lowercase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _A = random_chars(32 ) _A = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0] _A = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , __lowercase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"""Success {index+1}/{len(__lowercase )} with {file_name}""" ) _A = [] for anno in new_annos[index]: _A = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(__lowercase ) with open(f"""/{file_root}.txt""" , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def a__ ( __lowercase , __lowercase ) -> tuple[list, list]: _A = [] _A = [] for label_file in glob.glob(os.path.join(__lowercase , "*.txt" ) ): _A = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(__lowercase ) as in_file: _A = in_file.readlines() _A = os.path.join(__lowercase , f"""{label_name}.jpg""" ) _A = [] for obj_list in obj_lists: _A = obj_list.rstrip("\n" ).split(" " ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowercase ) labels.append(__lowercase ) return img_paths, labels def a__ ( __lowercase , __lowercase , __lowercase = 1 ) -> tuple[list, list, list]: _A = [] _A = [] _A = [] for idx in range(len(__lowercase ) ): _A = [] _A = img_list[idx] path_list.append(__lowercase ) _A = anno_list[idx] _A = cva.imread(__lowercase ) if flip_type == 1: _A = cva.flip(__lowercase , __lowercase ) for bbox in img_annos: _A = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _A = cva.flip(__lowercase , __lowercase ) for bbox in img_annos: _A = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowercase ) new_imgs_list.append(__lowercase ) return new_imgs_list, new_annos_lists, path_list def a__ ( __lowercase = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" _A = ascii_lowercase + digits return "".join(random.choice(__lowercase ) for _ in range(__lowercase ) ) if __name__ == "__main__": main() print("DONE ✅")
163
"""simple docstring""" import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def a__ ( __lowercase , __lowercase ) -> Dict: _A = old_name if "patch_embed" in old_name: _A , _A , _A = old_name.split("." ) if layer == "0": _A = old_name.replace("0" , "convolution1" ) elif layer == "1": _A = old_name.replace("1" , "batchnorm_before" ) elif layer == "3": _A = old_name.replace("3" , "convolution2" ) else: _A = old_name.replace("4" , "batchnorm_after" ) if "network" in old_name and re.search(R"\d\.\d" , __lowercase ): _A = R"\b\d{2}\b" if bool(re.search(__lowercase , __lowercase ) ): _A = re.search(R"\d\.\d\d." , __lowercase ).group() else: _A = re.search(R"\d\.\d." , __lowercase ).group() if int(match[0] ) < 6: _A = old_name.replace(__lowercase , "" ) _A = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] ) _A = "intermediate_stages." + trimmed_name else: _A = old_name.replace(__lowercase , "" ) if int(match[2] ) < num_meta4D_last_stage: _A = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] ) else: _A = str(int(match[2] ) - num_meta4D_last_stage ) _A = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index ) if "norm1" in old_name: _A = trimmed_name.replace("norm1" , "layernorm1" ) elif "norm2" in old_name: _A = trimmed_name.replace("norm2" , "layernorm2" ) elif "fc1" in old_name: _A = trimmed_name.replace("fc1" , "linear_in" ) elif "fc2" in old_name: _A = trimmed_name.replace("fc2" , "linear_out" ) _A = "last_stage." + trimmed_name elif "network" in old_name and re.search(R".\d." , __lowercase ): _A = old_name.replace("network" , "intermediate_stages" ) if "fc" in new_name: _A = new_name.replace("fc" , "convolution" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): _A = new_name.replace("norm1" , "batchnorm_before" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): _A = new_name.replace("norm2" , "batchnorm_after" ) if "proj" in new_name: _A = new_name.replace("proj" , "projection" ) if "dist_head" in new_name: _A = new_name.replace("dist_head" , "distillation_classifier" ) elif "head" in new_name: _A = new_name.replace("head" , "classifier" ) elif "patch_embed" in new_name: _A = "efficientformer." + new_name elif new_name == "norm.weight" or new_name == "norm.bias": _A = new_name.replace("norm" , "layernorm" ) _A = "efficientformer." + new_name else: _A = "efficientformer.encoder." + new_name return new_name def a__ ( __lowercase , __lowercase ) -> List[str]: for key in checkpoint.copy().keys(): _A = checkpoint.pop(__lowercase ) _A = val return checkpoint def a__ ( ) -> Dict: _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return image def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str: _A = torch.load(__lowercase , map_location="cpu" )["model"] _A = EfficientFormerConfig.from_json_file(__lowercase ) _A = EfficientFormerForImageClassificationWithTeacher(__lowercase ) _A = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] ) _A = config.depths[-1] - config.num_metaad_blocks + 1 _A = convert_torch_checkpoint(__lowercase , __lowercase ) model.load_state_dict(__lowercase ) model.eval() _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } # prepare image _A = prepare_img() _A = 256 _A = 224 _A = EfficientFormerImageProcessor( size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , ) _A = processor(images=__lowercase , return_tensors="pt" ).pixel_values # original processing pipeline _A = Compose( [ Resize(__lowercase , interpolation=pillow_resamplings["bicubic"] ), CenterCrop(__lowercase ), ToTensor(), Normalize(__lowercase , __lowercase ), ] ) _A = image_transforms(__lowercase ).unsqueeze(0 ) assert torch.allclose(__lowercase , __lowercase ) _A = model(__lowercase ) _A = outputs.logits _A = (1, 1000) if "l1" in model_name: _A = torch.Tensor( [-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] ) assert torch.allclose(logits[0, :10] , __lowercase , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: _A = torch.Tensor( [-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] ) assert torch.allclose(logits[0, :10] , __lowercase , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: _A = torch.Tensor( [-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] ) assert logits.shape == expected_shape else: raise ValueError( f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(__lowercase ) print(f"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print("Pushing model to the hub..." ) model.push_to_hub( repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__lowercase , ) processor.push_to_hub( repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__lowercase , ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to EfficientFormer pytorch checkpoint.", ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for EfficientFormer model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) parser.set_defaults(push_to_hub=True) a_ = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
163
1
"""simple docstring""" import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class snake_case__ : @property def a__ ( self ): return self.get_dummy_input() @property def a__ ( self ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." ) def a__ ( self , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , ): __a = 4 __a = 32 __a = (32, 32) __a = torch.manual_seed(0 ) __a = torch.device(lowerCamelCase ) __a = (batch_size, num_channels) + sizes __a = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase ) __a = {"hidden_states": hidden_states} if include_temb: __a = 128 __a = randn_tensor((batch_size, temb_channels) , generator=lowerCamelCase , device=lowerCamelCase ) if include_res_hidden_states_tuple: __a = torch.manual_seed(1 ) __a = (randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase ),) if include_encoder_hidden_states: __a = floats_tensor((batch_size, 32, 32) ).to(lowerCamelCase ) if include_skip_sample: __a = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCamelCase , device=lowerCamelCase ) return dummy_input def a__ ( self ): __a = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": __a = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) __a = self.dummy_input return init_dict, inputs_dict def a__ ( self , lowerCamelCase ): __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.block_class(**lowerCamelCase ) unet_block.to(lowerCamelCase ) unet_block.eval() with torch.no_grad(): __a = unet_block(**lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ): __a = output[0] self.assertEqual(output.shape , self.output_shape ) __a = output[0, -1, -3:, -3:] __a = torch.tensor(lowerCamelCase ).to(lowerCamelCase ) assert torch_all_close(output_slice.flatten() , lowerCamelCase , atol=5E-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def a__ ( self ): __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.block_class(**lowerCamelCase ) model.to(lowerCamelCase ) model.train() __a = model(**lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ): __a = output[0] __a = torch.device(lowerCamelCase ) __a = randn_tensor(output.shape , device=lowerCamelCase ) __a = torch.nn.functional.mse_loss(lowerCamelCase , lowerCamelCase ) loss.backward()
261
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ): __a , __a = row, column __a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )] def __str__( self ): __a = F"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier __a = 0 for row_vector in self.array: for obj in row_vector: __a = max(lowerCamelCase , len(str(lowerCamelCase ) ) ) __a = F"%{max_element_length}s" # Make string and return def single_line(lowerCamelCase ) -> str: nonlocal string_format_identifier __a = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array ) return s def __repr__( self ): return str(self ) def a__ ( self , lowerCamelCase ): if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self , lowerCamelCase , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) __a = value def __add__( self , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == another.row and self.column == another.column # Add __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] + another[r, c] return result def __neg__( self ): __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = -self[r, c] return result def __sub__( self , lowerCamelCase ): return self + (-another) def __mul__( self , lowerCamelCase ): if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] * another return result elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication assert self.column == another.row __a = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __a = F"Unsupported type given for another ({type(lowerCamelCase )})" raise TypeError(lowerCamelCase ) def a__ ( self ): __a = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] return result def a__ ( self , lowerCamelCase , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __a = v.transpose() __a = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _lowerCamelCase( ): # a^(-1) __a = Matrix(3 , 3 , 0 ) for i in range(3 ): __a = 1 print(F"a^(-1) is {ainv}" ) # u, v __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 1, 2, -3 __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 4, -2, 5 print(F"u is {u}" ) print(F"v is {v}" ) print(F"uv^T is {u * v.transpose()}" ) # Sherman Morrison print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" ) def _lowerCamelCase( ): import doctest doctest.testmod() testa()
261
1
'''simple docstring''' import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case__ = """▁""" snake_case__ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = BertGenerationTokenizer _lowerCAmelCase = False _lowerCAmelCase = True def _a ( self : Dict ): """simple docstring""" super().setUp() A_ : Optional[int] = BertGenerationTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : Tuple ): """simple docstring""" A_ : str = '''<s>''' A_ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(_lowerCamelCase ) , 1002 ) def _a ( self : Dict ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def _a ( self : Tuple ): """simple docstring""" A_ : int = BertGenerationTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) A_ : List[Any] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] , ) A_ : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) A_ : List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) A_ : List[Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def _a ( self : List[str] ): """simple docstring""" return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) @slow def _a ( self : str ): """simple docstring""" A_ : List[str] = '''Hello World!''' A_ : Any = [18536, 2260, 101] self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @slow def _a ( self : int ): """simple docstring""" A_ : Any = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) A_ : List[str] = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, ] self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @require_torch @slow def _a ( self : Optional[int] ): """simple docstring""" import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence A_ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] A_ : Dict = ''' '''.join(_lowerCamelCase ) A_ : List[Any] = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase ) A_ : Optional[int] = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase ) A_ : List[Any] = BertGenerationConfig() A_ : List[Any] = BertGenerationEncoder(_lowerCamelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_lowerCamelCase ) model(**_lowerCamelCase ) @slow def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = {'''input_ids''': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
4
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ): """simple docstring""" A_ : List[str] = parent A_ : List[str] = batch_size A_ : Union[str, Any] = image_size A_ : Tuple = num_channels A_ : Any = embeddings_size A_ : int = hidden_sizes A_ : Optional[Any] = depths A_ : List[Any] = is_training A_ : Optional[int] = use_labels A_ : int = hidden_act A_ : Tuple = num_labels A_ : Union[str, Any] = scope A_ : List[Any] = len(_lowerCamelCase ) A_ : Union[str, Any] = out_features A_ : List[Any] = out_indices A_ : Dict = num_groups def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Any = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = BitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Dict = self.num_labels A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Optional[Any] = None A_ : int = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs A_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCAmelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = BitModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[Any] ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(config=_lowerCamelCase ) for name, module in model.named_modules(): if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _a ( self : int ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ): A_ : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Tuple = layer_type A_ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : List[str] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : List[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) A_ : Union[str, Any] = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitBackbone,) if is_torch_available() else () _lowerCAmelCase = BitConfig _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BitModelTester(self )
4
1
from __future__ import annotations from cmath import sqrt def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[complex, complex]: if a == 0: raise ValueError('''Coefficient \'a\' must not be zero.''' ) __a = b * b - 4 * a * c __a = (-b + sqrt(a__ )) / (2 * a) __a = (-b - sqrt(a__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __lowerCAmelCase ( ) -> Tuple: __a , __a = quadratic_roots(a=5 , b=6 , c=1 ) print(F"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
6
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel A : int = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class __A( unittest.TestCase ): @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' __a = TOKEN HfFolder.save_token(_snake_case ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def __lowerCAmelCase ( a__ , a__ ) -> str: __a = True __a = flatten_dict(modela.params ) __a = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: __a = False return models_are_equal @require_flax class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case )
6
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "edbeeching/decision-transformer-gym-hopper-medium": ( "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = 'decision_transformer' __UpperCAmelCase : int = ['past_key_values'] __UpperCAmelCase : Optional[int] = { 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _a=17 , _a=4 , _a=128 , _a=4_096 , _a=True , _a=1 , _a=1_024 , _a=3 , _a=1 , _a=None , _a="relu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=False , _a=False , **_a , ): __a = state_dim __a = act_dim __a = hidden_size __a = max_ep_len __a = action_tanh __a = vocab_size __a = n_positions __a = n_layer __a = n_head __a = n_inner __a = activation_function __a = resid_pdrop __a = embd_pdrop __a = attn_pdrop __a = layer_norm_epsilon __a = initializer_range __a = scale_attn_weights __a = use_cache __a = scale_attn_by_inverse_layer_idx __a = reorder_and_upcast_attn __a = bos_token_id __a = eos_token_id super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
368
"""simple docstring""" import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]: __a , __a = image.size __a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) __a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0 __a = image[None].transpose(0 , 3 , 1 , 2 ) __a = torch.from_numpy(lowerCAmelCase__ ) return 2.0 * image - 1.0 class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , _a , _a , _a , ): super().__init__() self.register_modules(vqvae=_a , unet=_a , scheduler=_a ) @torch.no_grad() def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ): if isinstance(_a , PIL.Image.Image ): __a = 1 elif isinstance(_a , torch.Tensor ): __a = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' ) if isinstance(_a , PIL.Image.Image ): __a = preprocess(_a ) __a , __a = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __a = (batch_size, self.unet.config.in_channels // 2, height, width) __a = next(self.unet.parameters() ).dtype __a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a ) __a = image.to(device=self.device , dtype=_a ) # set timesteps and move to the correct device self.scheduler.set_timesteps(_a , device=self.device ) __a = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __a = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __a = {} if accepts_eta: __a = eta for t in self.progress_bar(_a ): # concat latents and low resolution image in the channel dimension. __a = torch.cat([latents, image] , dim=1 ) __a = self.scheduler.scale_model_input(_a , _a ) # predict the noise residual __a = self.unet(_a , _a ).sample # compute the previous noisy sample x_t -> x_t-1 __a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample # decode the image latents with the VQVAE __a = self.vqvae.decode(_a ).sample __a = torch.clamp(_a , -1.0 , 1.0 ) __a = image / 2 + 0.5 __a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a = self.numpy_to_pil(_a ) if not return_dict: return (image,) return ImagePipelineOutput(images=_a )
11
0
import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Initialise PyTorch model lowerCamelCase_ = FunnelConfig.from_json_file(lowerCamelCase__ ) print(F'Building PyTorch model from configuration: {config}' ) lowerCamelCase_ = FunnelBaseModel(lowerCamelCase__ ) if base_model else FunnelModel(lowerCamelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": __A =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.''' ) __A =parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
19
'''simple docstring''' import math class lowerCAmelCase : def snake_case ( self : Optional[int] , __lowercase : list[list[float]] , __lowercase : list[int] ): """simple docstring""" __lowercase =0.0 __lowercase =0.0 for i in range(len(__lowercase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def snake_case ( self : Union[str, Any] , __lowercase : list[list[int | float]] , __lowercase : list[int] , __lowercase : int , __lowercase : float ): """simple docstring""" for i in range(len(__lowercase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCamelCase ( ): '''simple docstring''' __lowercase =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) __lowercase =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training __lowercase =SelfOrganizingMap() __lowercase =3 __lowercase =0.5 for _ in range(lowercase__ ): for j in range(len(lowercase__ ) ): # training sample __lowercase =training_samples[j] # Compute the winning vector __lowercase =self_organizing_map.get_winner(lowercase__, lowercase__ ) # Update the winning vector __lowercase =self_organizing_map.update(lowercase__, lowercase__, lowercase__, lowercase__ ) # classify test sample __lowercase =[0, 0, 0, 1] __lowercase =self_organizing_map.get_winner(lowercase__, lowercase__ ) # results print(F'''Clusters that the test sample belongs to : {winner}''' ) print(F'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
141
0
from __future__ import annotations import queue class _SCREAMING_SNAKE_CASE : def __init__( self , lowercase ) -> Union[str, Any]: lowerCamelCase_ = data lowerCamelCase_ = None lowerCamelCase_ = None def lowerCamelCase_ ( ): print("\n********Press N to stop entering at any point of time********\n" ) lowerCamelCase_ = input("Enter the value of the root node: " ).strip().lower() lowerCamelCase_ = queue.Queue() lowerCamelCase_ = TreeNode(int(lowerCamelCase__ ) ) q.put(lowerCamelCase__ ) while not q.empty(): lowerCamelCase_ = q.get() lowerCamelCase_ = F'Enter the left node of {node_found.data}: ' lowerCamelCase_ = input(lowerCamelCase__ ).strip().lower() or "n" if check == "n": return tree_node lowerCamelCase_ = TreeNode(int(lowerCamelCase__ ) ) lowerCamelCase_ = left_node q.put(lowerCamelCase__ ) lowerCamelCase_ = F'Enter the right node of {node_found.data}: ' lowerCamelCase_ = input(lowerCamelCase__ ).strip().lower() or "n" if check == "n": return tree_node lowerCamelCase_ = TreeNode(int(lowerCamelCase__ ) ) lowerCamelCase_ = right_node q.put(lowerCamelCase__ ) raise def lowerCamelCase_ ( lowerCamelCase__ ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node: return print(node.data , end="," ) pre_order(node.left ) pre_order(node.right ) def lowerCamelCase_ ( lowerCamelCase__ ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node: return in_order(node.left ) print(node.data , end="," ) in_order(node.right ) def lowerCamelCase_ ( lowerCamelCase__ ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end="," ) def lowerCamelCase_ ( lowerCamelCase__ ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node: return lowerCamelCase_ = queue.Queue() q.put(lowerCamelCase__ ) while not q.empty(): lowerCamelCase_ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCamelCase_ ( lowerCamelCase__ ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node: return lowerCamelCase_ = queue.Queue() q.put(lowerCamelCase__ ) while not q.empty(): lowerCamelCase_ = [] while not q.empty(): lowerCamelCase_ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(lowerCamelCase__ ) def lowerCamelCase_ ( lowerCamelCase__ ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node: return lowerCamelCase_ = [] lowerCamelCase_ = node while n or stack: while n: # start from root node, find its left child print(n.data , end="," ) stack.append(lowerCamelCase__ ) lowerCamelCase_ = n.left # end of while means current node doesn't have left child lowerCamelCase_ = stack.pop() # start to traverse its right child lowerCamelCase_ = n.right def lowerCamelCase_ ( lowerCamelCase__ ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node: return lowerCamelCase_ = [] lowerCamelCase_ = node while n or stack: while n: stack.append(lowerCamelCase__ ) lowerCamelCase_ = n.left lowerCamelCase_ = stack.pop() print(n.data , end="," ) lowerCamelCase_ = n.right def lowerCamelCase_ ( lowerCamelCase__ ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node: return lowerCamelCase_ , lowerCamelCase_ = [], [] lowerCamelCase_ = node stacka.append(lowerCamelCase__ ) while stacka: # to find the reversed order of post order, store it in stack2 lowerCamelCase_ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(lowerCamelCase__ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end="," ) def lowerCamelCase_ ( lowerCamelCase__ = "" , lowerCamelCase__=5_0 , lowerCamelCase__="*" ): if not s: return "\n" + width * char lowerCamelCase_ , lowerCamelCase_ = divmod(width - len(lowerCamelCase__ ) - 2 , 2 ) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt('''Binary Tree Traversals''')) __A =build_tree() print(prompt('''Pre Order Traversal''')) pre_order(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal''')) in_order(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal''')) post_order(node) print(prompt() + '''\n''') print(prompt('''Level Order Traversal''')) level_order(node) print(prompt() + '''\n''') print(prompt('''Actual Level Order Traversal''')) level_order_actual(node) print('''*''' * 5_0 + '''\n''') print(prompt('''Pre Order Traversal - Iteration Version''')) pre_order_iter(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal - Iteration Version''')) in_order_iter(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal - Iteration Version''')) post_order_iter(node) print(prompt())
354
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __A =logging.get_logger(__name__) __A ={ '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } __A =[ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): for attribute in key.split("." ): lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: lowerCamelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": lowerCamelCase_ = value elif weight_type == "weight_g": lowerCamelCase_ = value elif weight_type == "weight_v": lowerCamelCase_ = value elif weight_type == "bias": lowerCamelCase_ = value else: lowerCamelCase_ = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = [] lowerCamelCase_ = fairseq_model.state_dict() lowerCamelCase_ = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase_ = False if "conv_layers" in name: load_conv_layer( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase_ = True else: for key, mapped_key in MAPPING.items(): lowerCamelCase_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue lowerCamelCase_ = True if "*" in mapped_key: lowerCamelCase_ = name.split(lowerCamelCase__ )[0].split("." )[-2] lowerCamelCase_ = mapped_key.replace("*" , lowerCamelCase__ ) if "weight_g" in name: lowerCamelCase_ = "weight_g" elif "weight_v" in name: lowerCamelCase_ = "weight_v" elif "bias" in name: lowerCamelCase_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase_ = "weight" else: lowerCamelCase_ = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(F'Unused weights: {unused_weights}' ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = full_name.split("conv_layers." )[-1] lowerCamelCase_ = name.split("." ) lowerCamelCase_ = int(items[0] ) lowerCamelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) lowerCamelCase_ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) lowerCamelCase_ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' ) lowerCamelCase_ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) lowerCamelCase_ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowerCamelCase__ ) @torch.no_grad() def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ): if config_path is not None: lowerCamelCase_ = UniSpeechSatConfig.from_pretrained(lowerCamelCase__ ) else: lowerCamelCase_ = UniSpeechSatConfig() lowerCamelCase_ = "" if is_finetuned: lowerCamelCase_ = UniSpeechSatForCTC(lowerCamelCase__ ) else: lowerCamelCase_ = UniSpeechSatForPreTraining(lowerCamelCase__ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) lowerCamelCase_ = model[0].eval() recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ ) hf_wavavec.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": __A =argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __A =parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
47
0
'''simple docstring''' from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None ): if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release: # old versions of hfh don't url-encode the file path UpperCAmelCase__ : Optional[int] = quote(UpperCamelCase__ ) return hfh.hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" , revision=UpperCamelCase__ )
163
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _UpperCamelCase ( UpperCamelCase__ ): return x + 2 class _snake_case ( unittest.TestCase ): def snake_case__ ( self): UpperCAmelCase__ : List[str] = """x = 3""" UpperCAmelCase__ : Dict = {} UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase) assert result == 3 self.assertDictEqual(_lowerCamelCase , {"""x""": 3}) UpperCAmelCase__ : Optional[int] = """x = y""" UpperCAmelCase__ : Optional[Any] = {"""y""": 5} UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_lowerCamelCase , {"""x""": 5, """y""": 5}) def snake_case__ ( self): UpperCAmelCase__ : Any = """y = add_two(x)""" UpperCAmelCase__ : Optional[Any] = {"""x""": 3} UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase) assert result == 5 self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5}) # Won't work without the tool with CaptureStdout() as out: UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase) assert result is None assert "tried to execute add_two" in out.out def snake_case__ ( self): UpperCAmelCase__ : Union[str, Any] = """x = 3""" UpperCAmelCase__ : Dict = {} UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase) assert result == 3 self.assertDictEqual(_lowerCamelCase , {"""x""": 3}) def snake_case__ ( self): UpperCAmelCase__ : Union[str, Any] = """test_dict = {'x': x, 'y': add_two(x)}""" UpperCAmelCase__ : Any = {"""x""": 3} UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase) self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5}) self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}}) def snake_case__ ( self): UpperCAmelCase__ : List[Any] = """x = 3\ny = 5""" UpperCAmelCase__ : Union[str, Any] = {} UpperCAmelCase__ : List[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5}) def snake_case__ ( self): UpperCAmelCase__ : Dict = """text = f'This is x: {x}.'""" UpperCAmelCase__ : str = {"""x""": 3} UpperCAmelCase__ : Optional[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """text""": """This is x: 3."""}) def snake_case__ ( self): UpperCAmelCase__ : Union[str, Any] = """if x <= 3:\n y = 2\nelse:\n y = 5""" UpperCAmelCase__ : Optional[Any] = {"""x""": 3} UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 2}) UpperCAmelCase__ : Optional[int] = {"""x""": 8} UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_lowerCamelCase , {"""x""": 8, """y""": 5}) def snake_case__ ( self): UpperCAmelCase__ : Union[str, Any] = """test_list = [x, add_two(x)]""" UpperCAmelCase__ : int = {"""x""": 3} UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase) self.assertListEqual(_lowerCamelCase , [3, 5]) self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]}) def snake_case__ ( self): UpperCAmelCase__ : Tuple = """y = x""" UpperCAmelCase__ : Optional[Any] = {"""x""": 3} UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase) assert result == 3 self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 3}) def snake_case__ ( self): UpperCAmelCase__ : List[str] = """test_list = [x, add_two(x)]\ntest_list[1]""" UpperCAmelCase__ : Union[str, Any] = {"""x""": 3} UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase) assert result == 5 self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]}) UpperCAmelCase__ : List[str] = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" UpperCAmelCase__ : Any = {"""x""": 3} UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase) assert result == 5 self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}}) def snake_case__ ( self): UpperCAmelCase__ : Optional[int] = """x = 0\nfor i in range(3):\n x = i""" UpperCAmelCase__ : str = {} UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""range""": range} , state=_lowerCamelCase) assert result == 2 self.assertDictEqual(_lowerCamelCase , {"""x""": 2, """i""": 2})
163
1
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. UpperCamelCase = 10 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> int: for i in range(__lowercase , __lowercase ): if array[i] == target: return i return -1 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int: A: str = 0 A: List[Any] = len(__lowercase ) while left <= right: if right - left < precision: return lin_search(__lowercase , __lowercase , __lowercase , __lowercase ) A: Dict = (left + right) // 3 + 1 A: Dict = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: A: Dict = one_third - 1 elif array[two_third] < target: A: List[str] = two_third + 1 else: A: Optional[Any] = one_third + 1 A: Optional[int] = two_third - 1 else: return -1 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> int: if left < right: if right - left < precision: return lin_search(__lowercase , __lowercase , __lowercase , __lowercase ) A: Dict = (left + right) // 3 + 1 A: str = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(__lowercase , one_third - 1 , __lowercase , __lowercase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , __lowercase , __lowercase , __lowercase ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , __lowercase , __lowercase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = input('''Enter numbers separated by comma:\n''').strip() UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')] assert collection == sorted(collection), f"List must be ordered.\n{collection}." UpperCamelCase = int(input('''Enter the number to be found in the list:\n''').strip()) UpperCamelCase = ite_ternary_search(collection, target) UpperCamelCase = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'Iterative search: {target} found at positions: {resulta}') print(f'Recursive search: {target} found at positions: {resulta}') else: print('''Not found''')
366
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger() @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : list = field(default_factory=UpperCAmelCase_ ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Tensor ) -> int: '''simple docstring''' A: List[str] = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(SCREAMING_SNAKE_CASE_ ) def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Dict: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(SCREAMING_SNAKE_CASE_ ) [x.remove() for x in self.handles] return self @property def _snake_case ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' return list(filter(lambda SCREAMING_SNAKE_CASE_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : nn.Module UpperCamelCase_ : int = 0 UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[Any]: '''simple docstring''' A: Dict = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized A: Tuple = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) ) A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise Exception( f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while""" f""" destination module has {len(SCREAMING_SNAKE_CASE_ )}.""" ) for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase = True ) -> Any: print(F"""Converting {name}...""" ) with torch.no_grad(): A: Union[str, Any] = timm.create_model(__lowercase , pretrained=__lowercase ).eval() A: List[str] = ResNetForImageClassification(__lowercase ).eval() A: int = ModuleTransfer(src=__lowercase , dest=__lowercase ) A: List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) ) module_transfer(__lowercase ) assert torch.allclose(from_model(__lowercase ) , our_model(__lowercase ).logits ), "The model logits don't match the original one." A: str = F"""resnet{'-'.join(name.split('resnet' ) )}""" print(__lowercase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__lowercase , ) # we can use the convnext one A: Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , ) print(F"""Pushed {checkpoint_name}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None , __lowercase = True ) -> List[Any]: A: Union[str, Any] = '''imagenet-1k-id2label.json''' A: Union[str, Any] = 1_0_0_0 A: Optional[int] = (1, num_labels) A: Dict = '''huggingface/label-files''' A: Any = num_labels A: Union[str, Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Optional[int] = {int(__lowercase ): v for k, v in idalabel.items()} A: Optional[int] = idalabel A: List[str] = {v: k for k, v in idalabel.items()} A: str = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase ) A: Optional[Any] = { '''resnet18''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet26''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet34''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet50''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet101''': ImageNetPreTrainedConfig( depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet152''': ImageNetPreTrainedConfig( depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), } if model_name: convert_weight_and_push(__lowercase , names_to_config[model_name] , __lowercase , __lowercase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__lowercase , __lowercase , __lowercase , __lowercase ) return config, expected_shape if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
0
'''simple docstring''' import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __snake_case ="""▁""" __snake_case =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): lowerCamelCase : int = BertGenerationTokenizer lowerCamelCase : Union[str, Any] = False lowerCamelCase : Dict = True def __UpperCAmelCase ( self : int ) -> int: super().setUp() lowerCAmelCase = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self : str ) -> List[Any]: lowerCAmelCase = '<s>' lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[Any] ) -> Tuple: lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<pad>' ) self.assertEqual(len(UpperCAmelCase__ ) , 1_0_0_2 ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def __UpperCAmelCase ( self : str ) -> Optional[int]: lowerCAmelCase = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) lowerCAmelCase = tokenizer.tokenize('This is a test' ) self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def __UpperCAmelCase ( self : Tuple ) -> int: return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) @slow def __UpperCAmelCase ( self : Tuple ) -> Optional[int]: lowerCAmelCase = 'Hello World!' lowerCAmelCase = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @slow def __UpperCAmelCase ( self : Dict ) -> int: lowerCAmelCase = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) lowerCAmelCase = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @require_torch @slow def __UpperCAmelCase ( self : Tuple ) -> Optional[int]: import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowerCAmelCase = ' '.join(UpperCAmelCase__ ) lowerCAmelCase = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='pt' , return_token_type_ids=UpperCAmelCase__ ) lowerCAmelCase = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase__ ) lowerCAmelCase = BertGenerationConfig() lowerCAmelCase = BertGenerationEncoder(UpperCAmelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCAmelCase__ ) model(**UpperCAmelCase__ ) @slow def __UpperCAmelCase ( self : Dict ) -> List[str]: # fmt: off lowerCAmelCase = {'input_ids': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
4
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __snake_case =logging.get_logger(__name__) def a_ ( lowerCamelCase : Any ): lowerCAmelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith('module.encoder' ): lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' ) if key.startswith('module.decoder' ): lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )] lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' ) if "norm" in key: lowerCAmelCase = key.replace('norm' , 'layer_norm' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )] lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' ) if "layer_norm1" in key: lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase = key[key.find('block' ) + len('block' )] lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' ) if "attn.q" in key: lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowerCAmelCase = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowerCAmelCase = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowerCAmelCase = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowerCAmelCase = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )] lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' ) if "bot_conv" in key: lowerCAmelCase = key.replace('bot_conv' , '0.convolution' ) if "skip_conv1" in key: lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' ) if "skip_conv2" in key: lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' ) if "fusion1" in key: lowerCAmelCase = key.replace('fusion1' , '1.fusion' ) if "fusion2" in key: lowerCAmelCase = key.replace('fusion2' , '2.fusion' ) if "fusion3" in key: lowerCAmelCase = key.replace('fusion3' , '3.fusion' ) if "fusion" in key and "conv" in key: lowerCAmelCase = key.replace('conv' , 'convolutional_layer' ) if key.startswith('module.last_layer_depth' ): lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' ) lowerCAmelCase = value return new_state_dict def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase = kv_bias[config.hidden_sizes[i] :] def a_ ( ): lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return image @torch.no_grad() def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ): lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) lowerCAmelCase = GLPNImageProcessor() # prepare image lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values logger.info('Converting model...' ) # load original state dict lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) ) # rename keys lowerCAmelCase = rename_keys(lowerCamelCase ) # key and value matrices need special treatment read_in_k_v(lowerCamelCase , lowerCamelCase ) # create HuggingFace model and load state dict lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() # forward pass lowerCAmelCase = model(lowerCamelCase ) lowerCAmelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase = torch.tensor( [[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] ) elif "kitti" in model_name: lowerCAmelCase = torch.tensor( [[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] ) else: raise ValueError(f'''Unknown model name: {model_name}''' ) lowerCAmelCase = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 ) print('Looks ok!' ) # finally, push to hub if required if push_to_hub: logger.info('Pushing model and image processor to the hub...' ) model.push_to_hub( repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , ) if __name__ == "__main__": __snake_case =argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) __snake_case =parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
4
1
'''simple docstring''' from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging a : Tuple = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class a ( _lowerCamelCase ): def __init__( self : Tuple , lowercase_ : int = 101 ): snake_case_ = length def __len__( self : int ): return self.length def __getitem__( self : Dict , lowercase_ : Union[str, Any] ): return i class a : def __call__( self : Optional[int] , lowercase_ : Optional[Any] ): return {"input_ids": torch.tensor(lowercase_ ), "labels": torch.tensor(lowercase_ )} class a ( nn.Module ): def __init__( self : List[str] ): super().__init__() # Add some (unused) params otherwise DDP will complain. snake_case_ = nn.Linear(120 , 80 ) def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str]=None ): if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class a ( _lowerCamelCase ): @require_torch_neuroncore def A_ ( self : Union[str, Any] ): snake_case_ = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = F"--output_dir {output_dir}".split() snake_case_ = ['''torchrun'''] + distributed_args + args execute_subprocess_async(lowercase_ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class a ( _lowerCamelCase ): @require_torch_multi_gpu def A_ ( self : List[Any] ): snake_case_ = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = F"--output_dir {output_dir}".split() snake_case_ = ['''torchrun'''] + distributed_args + args execute_subprocess_async(lowercase_ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py a : List[str] = HfArgumentParser((TrainingArguments,)) a : List[Any] = parser.parse_args_into_dataclasses()[0] logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: a : str = DummyDataset(dataset_length) def __magic_name__ ( __UpperCAmelCase ) -> Dict: '''simple docstring''' snake_case_ = list(range(len(__UpperCAmelCase ) ) ) snake_case_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( '''Predictions and/or labels do not match expected results:\n - predictions: ''' F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} a : List[Any] = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) a : int = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) a : Dict = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) a : Union[str, Any] = 2 a : Optional[Any] = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) a : Tuple = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) a : Union[str, Any] = None
72
'''simple docstring''' import math from collections.abc import Callable def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float: '''simple docstring''' snake_case_ = xa snake_case_ = xa while True: if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ): raise ZeroDivisionError('''float division by zero, could not find root''' ) snake_case_ = x_na - ( function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na snake_case_ = x_na snake_case_ = x_na def __magic_name__ ( __UpperCAmelCase ) -> float: '''simple docstring''' return math.pow(__UpperCAmelCase, 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
72
1
'''simple docstring''' import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _A : str =logging.get_logger(__name__) _A : str ={ '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _A : Dict =[ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: for attribute in key.split(""".""" ): lowerCamelCase__ : Union[str, Any] = getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: lowerCamelCase__ : Optional[int] = getattr(UpperCamelCase , UpperCamelCase ).shape else: lowerCamelCase__ : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase__ : Optional[Any] = value elif weight_type == "weight_g": lowerCamelCase__ : str = value elif weight_type == "weight_v": lowerCamelCase__ : Any = value elif weight_type == "bias": lowerCamelCase__ : str = value else: lowerCamelCase__ : Any = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int: lowerCamelCase__ : int = [] lowerCamelCase__ : Optional[Any] = fairseq_model.state_dict() lowerCamelCase__ : Dict = hf_model.feature_extractor lowerCamelCase__ : List[str] = hf_model.adapter for name, value in fairseq_dict.items(): lowerCamelCase__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , ) lowerCamelCase__ : Tuple = True elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ): load_adapter(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : List[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowerCamelCase__ : Dict = True if "*" in mapped_key: lowerCamelCase__ : Dict = name.split(UpperCamelCase )[0].split(""".""" )[-2] lowerCamelCase__ : Any = mapped_key.replace("""*""" , UpperCamelCase ) if "weight_g" in name: lowerCamelCase__ : Optional[int] = """weight_g""" elif "weight_v" in name: lowerCamelCase__ : Any = """weight_v""" elif "bias" in name: lowerCamelCase__ : List[str] = """bias""" elif "weight" in name: lowerCamelCase__ : int = """weight""" else: lowerCamelCase__ : Optional[Any] = None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: lowerCamelCase__ : Any = full_name.split("""conv_layers.""" )[-1] lowerCamelCase__ : int = name.split(""".""" ) lowerCamelCase__ : List[Any] = int(items[0] ) lowerCamelCase__ : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase__ : Union[str, Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase__ : List[Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCamelCase__ : List[Any] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase__ : Dict = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: lowerCamelCase__ : Union[str, Any] = full_name.split("""adaptor.""" )[-1] lowerCamelCase__ : Optional[int] = name.split(""".""" ) if items[1].isdigit(): lowerCamelCase__ : Dict = int(items[1] ) else: lowerCamelCase__ : str = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.''' lowerCamelCase__ : List[Any] = value logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.''' lowerCamelCase__ : Dict = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.''' lowerCamelCase__ : Tuple = value logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.''' lowerCamelCase__ : Union[str, Any] = value logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' ) elif isinstance(UpperCamelCase , UpperCamelCase ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.''' lowerCamelCase__ : Any = value logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.''' lowerCamelCase__ : str = value logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = emb.weight.shape lowerCamelCase__ : int = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) lowerCamelCase__ : str = emb.weight.data return lin_layer @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Union[str, Any]: lowerCamelCase__ : Optional[Any] = WavaVecaConfig.from_pretrained( UpperCamelCase , add_adapter=UpperCamelCase , adapter_stride=UpperCamelCase , adapter_kernel_size=UpperCamelCase , use_auth_token=UpperCamelCase , output_hidden_size=UpperCamelCase , ) lowerCamelCase__ : Dict = MBartConfig.from_pretrained(UpperCamelCase ) # load model lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ """config_yaml""": config_yaml_path, """data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path, """load_pretrained_decoder_from""": None, } , ) lowerCamelCase__ : Union[str, Any] = model[0].eval() # load feature extractor lowerCamelCase__ : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase , use_auth_token=UpperCamelCase ) # set weights for wav2vec2 encoder lowerCamelCase__ : int = WavaVecaModel(UpperCamelCase ) recursively_load_weights_wavaveca(model.encoder , UpperCamelCase ) # load decoder weights lowerCamelCase__ : int = MBartForCausalLM(UpperCamelCase ) lowerCamelCase__ , lowerCamelCase__ : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase ) logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) lowerCamelCase__ : List[str] = SpeechEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase ) lowerCamelCase__ : Any = False lowerCamelCase__ : Any = MBartaaTokenizer(UpperCamelCase ) tokenizer.save_pretrained(UpperCamelCase ) lowerCamelCase__ : List[str] = hf_wavavec.config.to_dict() lowerCamelCase__ : Union[str, Any] = tokenizer.pad_token_id lowerCamelCase__ : Dict = tokenizer.bos_token_id lowerCamelCase__ : List[str] = tokenizer.eos_token_id lowerCamelCase__ : Tuple = """mbart50""" lowerCamelCase__ : int = """wav2vec2""" lowerCamelCase__ : Any = tokenizer.eos_token_id lowerCamelCase__ : List[Any] = 250004 lowerCamelCase__ : Dict = tokenizer.eos_token_id lowerCamelCase__ : Tuple = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase ) hf_wavavec.save_pretrained(UpperCamelCase ) feature_extractor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _A : Optional[Any] =argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-xls-r-1b''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/mbart-large-50-one-to-many-mmt''', type=str, help='''Path to hf decoder checkpoint config''', ) parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''') parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''') parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''') parser.add_argument('''--encoder_output_dim''', default=1_024, type=int, help='''encoder output dim''') parser.add_argument('''--start_token_id''', default=250_004, type=int, help='''`decoder_start_token_id` of model config''') _A : Optional[Any] =parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
41
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } lowerCAmelCase__ = { 'facebook/mbart-large-en-ro': 10_24, 'facebook/mbart-large-cc25': 10_24, } # fmt: off lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = MBartTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it _A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) _A : Union[str, Any] = vocab_file _A : int = False if not self.vocab_file else True _A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "en_XX" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : List[str] = [self.sep_token_id] _A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : str = src_lang _A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Dict = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding: _A : Any = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : int = self.convert_tokens_to_ids(__lowerCamelCase) _A : int = [] _A : List[str] = [self.eos_token_id, self.cur_lang_code] _A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : str = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase) _A : List[Any] = [] _A : str = [self.eos_token_id, self.cur_lang_code] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : int = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A_ = logging.get_logger(__name__) A_ = '''▁''' A_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} A_ = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } A_ = {'''vinai/bartpho-syllable''': 10_24} class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "attention_mask"] def __init__( self: int, a_: str, a_: Optional[int], a_: List[str]="<s>", a_: List[Any]="</s>", a_: Dict="</s>", a_: Union[str, Any]="<s>", a_: Optional[int]="<unk>", a_: Optional[Any]="<pad>", a_: str="<mask>", a_: Optional[Dict[str, Any]] = None, **a_: int, ): '''simple docstring''' _snake_case : Optional[Any] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else mask_token _snake_case : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, cls_token=a_, pad_token=a_, mask_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, ) _snake_case : Any = vocab_file _snake_case : List[Any] = monolingual_vocab_file _snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(a_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _snake_case : Tuple = {} _snake_case : Union[str, Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(a_ ) not in self.fairseq_tokens_to_ids: _snake_case : Optional[int] = cnt cnt += 1 with open(a_, """r""", encoding="""utf-8""" ) as f: for line in f.readlines(): _snake_case : Union[str, Any] = line.strip().split()[0] _snake_case : Tuple = len(self.fairseq_tokens_to_ids ) if str(a_ ) not in self.fairseq_tokens_to_ids: _snake_case : str = len(self.fairseq_tokens_to_ids ) _snake_case : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self: Optional[int] ): '''simple docstring''' _snake_case : Tuple = self.__dict__.copy() _snake_case : List[Any] = None _snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self: Optional[Any], a_: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = d # for backward compatibility if not hasattr(self, """sp_model_kwargs""" ): _snake_case : Optional[int] = {} _snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCamelCase_ ( self: Union[str, Any], a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case : str = [self.cls_token_id] _snake_case : Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self: Dict, a_: List[int], a_: Optional[List[int]] = None, a_: bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ ) if token_ids_a is None: return [1] + ([0] * len(a_ )) + [1] return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1] def UpperCamelCase_ ( self: Union[str, Any], a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : Union[str, Any] = [self.sep_token_id] _snake_case : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self: int ): '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Tuple = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self: Tuple, a_: str ): '''simple docstring''' return self.sp_model.encode(a_, out_type=a_ ) def UpperCamelCase_ ( self: Optional[int], a_: List[str] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def UpperCamelCase_ ( self: List[str], a_: List[str] ): '''simple docstring''' return self.fairseq_ids_to_tokens[index] def UpperCamelCase_ ( self: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : Optional[int] = """""".join(a_ ).replace(a_, """ """ ).strip() return out_string def UpperCamelCase_ ( self: List[str], a_: str, a_: Optional[str] = None ): '''simple docstring''' if not os.path.isdir(a_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _snake_case : Any = os.path.join( a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _snake_case : Any = os.path.join( a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""], ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, a_ ) elif not os.path.isfile(self.vocab_file ): with open(a_, """wb""" ) as fi: _snake_case : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(a_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( a_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file, a_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(a_, """w""", encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(a_ )} \n" ) return out_vocab_file, out_monolingual_vocab_file
132
"""simple docstring""" import torch from torch import nn class lowercase( nn.Module ): '''simple docstring''' def __init__( self: Any, a_: List[str], a_: Union[str, Any], a_: int, a_: int, a_: List[Any]=1, a_: Union[str, Any]=False ): '''simple docstring''' super().__init__() _snake_case : int = n_token _snake_case : Tuple = d_embed _snake_case : List[str] = d_proj _snake_case : Optional[int] = cutoffs + [n_token] _snake_case : Any = [0] + self.cutoffs _snake_case : Tuple = div_val _snake_case : Optional[int] = self.cutoffs[0] _snake_case : Union[str, Any] = len(self.cutoffs ) - 1 _snake_case : Union[str, Any] = self.shortlist_size + self.n_clusters if self.n_clusters > 0: _snake_case : List[Any] = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) ) _snake_case : Tuple = nn.Parameter(torch.zeros(self.n_clusters ) ) _snake_case : Any = nn.ModuleList() _snake_case : Optional[Any] = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) ) else: self.out_projs.append(a_ ) self.out_layers.append(nn.Linear(a_, a_ ) ) else: for i in range(len(self.cutoffs ) ): _snake_case , _snake_case : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] _snake_case : Union[str, Any] = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) ) self.out_layers.append(nn.Linear(a_, r_idx - l_idx ) ) _snake_case : Optional[int] = keep_order def UpperCamelCase_ ( self: str, a_: Union[str, Any], a_: Dict, a_: int, a_: Tuple ): '''simple docstring''' if proj is None: _snake_case : List[Any] = nn.functional.linear(a_, a_, bias=a_ ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: _snake_case : List[Any] = nn.functional.linear(a_, proj.t().contiguous() ) _snake_case : Tuple = nn.functional.linear(a_, a_, bias=a_ ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def UpperCamelCase_ ( self: Dict, a_: Dict, a_: str=None, a_: Union[str, Any]=False ): '''simple docstring''' if labels is not None: # Shift so that tokens < n predict n _snake_case : int = hidden[..., :-1, :].contiguous() _snake_case : List[Any] = labels[..., 1:].contiguous() _snake_case : str = hidden.view(-1, hidden.size(-1 ) ) _snake_case : Dict = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" ) else: _snake_case : int = hidden.view(-1, hidden.size(-1 ) ) if self.n_clusters == 0: _snake_case : Tuple = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) if labels is not None: _snake_case : Dict = labels != -100 _snake_case : str = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device ) _snake_case : str = ( -nn.functional.log_softmax(a_, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: _snake_case : Optional[int] = nn.functional.log_softmax(a_, dim=-1 ) else: # construct weights and biases _snake_case , _snake_case : Optional[int] = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: _snake_case , _snake_case : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] _snake_case : List[Any] = self.out_layers[0].weight[l_idx:r_idx] _snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx] else: _snake_case : Optional[int] = self.out_layers[i].weight _snake_case : int = self.out_layers[i].bias if i == 0: _snake_case : List[str] = torch.cat([weight_i, self.cluster_weight], dim=0 ) _snake_case : int = torch.cat([bias_i, self.cluster_bias], dim=0 ) weights.append(a_ ) biases.append(a_ ) _snake_case , _snake_case , _snake_case : Any = weights[0], biases[0], self.out_projs[0] _snake_case : List[str] = self._compute_logit(a_, a_, a_, a_ ) _snake_case : Union[str, Any] = nn.functional.log_softmax(a_, dim=1 ) if labels is None: _snake_case : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: _snake_case : Dict = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device ) _snake_case : Union[str, Any] = 0 _snake_case : Optional[Any] = [0] + self.cutoffs for i in range(len(a_ ) - 1 ): _snake_case , _snake_case : Dict = cutoff_values[i], cutoff_values[i + 1] if labels is not None: _snake_case : Dict = (labels >= l_idx) & (labels < r_idx) _snake_case : int = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue _snake_case : List[str] = labels.index_select(0, a_ ) - l_idx _snake_case : List[str] = head_logprob.index_select(0, a_ ) _snake_case : List[str] = hidden.index_select(0, a_ ) else: _snake_case : List[str] = hidden if i == 0: if labels is not None: _snake_case : Dict = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 ) else: _snake_case : Optional[Any] = head_logprob[:, : self.cutoffs[0]] else: _snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i] _snake_case : int = self._compute_logit(a_, a_, a_, a_ ) _snake_case : int = nn.functional.log_softmax(a_, dim=1 ) _snake_case : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: _snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1, target_i[:, None] ).squeeze(1 ) else: _snake_case : Optional[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i _snake_case : Any = logprob_i if labels is not None: if (hasattr(self, """keep_order""" ) and self.keep_order) or keep_order: out.index_copy_(0, a_, -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[int] ): '''simple docstring''' if self.n_clusters == 0: _snake_case : Optional[int] = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] ) return nn.functional.log_softmax(a_, dim=-1 ) else: # construct weights and biases _snake_case , _snake_case : List[Any] = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: _snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] _snake_case : Optional[int] = self.out_layers[0].weight[l_idx:r_idx] _snake_case : str = self.out_layers[0].bias[l_idx:r_idx] else: _snake_case : List[Any] = self.out_layers[i].weight _snake_case : Union[str, Any] = self.out_layers[i].bias if i == 0: _snake_case : int = torch.cat([weight_i, self.cluster_weight], dim=0 ) _snake_case : Dict = torch.cat([bias_i, self.cluster_bias], dim=0 ) weights.append(a_ ) biases.append(a_ ) _snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0] _snake_case : List[Any] = self._compute_logit(a_, a_, a_, a_ ) _snake_case : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) ) _snake_case : int = nn.functional.log_softmax(a_, dim=1 ) _snake_case : List[Any] = [0] + self.cutoffs for i in range(len(a_ ) - 1 ): _snake_case , _snake_case : List[str] = cutoff_values[i], cutoff_values[i + 1] if i == 0: _snake_case : List[str] = head_logprob[:, : self.cutoffs[0]] else: _snake_case , _snake_case , _snake_case : List[Any] = weights[i], biases[i], self.out_projs[i] _snake_case : Optional[int] = self._compute_logit(a_, a_, a_, a_ ) _snake_case : Any = nn.functional.log_softmax(a_, dim=1 ) _snake_case : Dict = head_logprob[:, -i] + tail_logprob_i _snake_case : Any = logprob_i return out
132
1
"""simple docstring""" from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder __UpperCamelCase = datasets.utils.logging.get_logger(__name__) class UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ): SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None class UpperCamelCase ( folder_based_builder.FolderBasedBuilder ): SCREAMING_SNAKE_CASE_ = datasets.Audio() SCREAMING_SNAKE_CASE_ = "audio" SCREAMING_SNAKE_CASE_ = AudioFolderConfig SCREAMING_SNAKE_CASE_ = 42 # definition at the bottom of the script SCREAMING_SNAKE_CASE_ = AudioClassification(audio_column="audio" , label_column="label" ) __UpperCamelCase = [ '''.aiff''', '''.au''', '''.avr''', '''.caf''', '''.flac''', '''.htk''', '''.svx''', '''.mat4''', '''.mat5''', '''.mpc2k''', '''.ogg''', '''.paf''', '''.pvf''', '''.raw''', '''.rf64''', '''.sd2''', '''.sds''', '''.ircam''', '''.voc''', '''.w64''', '''.wav''', '''.nist''', '''.wavex''', '''.wve''', '''.xi''', '''.mp3''', '''.opus''', ] __UpperCamelCase = AUDIO_EXTENSIONS
69
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : List[Any] = logging.get_logger(__name__) lowerCamelCase : str = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( A__ ): A__ = 'time_series_transformer' A__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : Optional[int] , _a : Optional[int] = None , _a : Optional[int] = None , _a : str = "student_t" , _a : str = "nll" , _a : int = 1 , _a : List[int] = [1, 2, 3, 4, 5, 6, 7] , _a : Optional[Union[str, bool]] = "mean" , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : Optional[List[int]] = None , _a : Optional[List[int]] = None , _a : int = 32 , _a : int = 32 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : bool = True , _a : str = "gelu" , _a : int = 64 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : int = 100 , _a : float = 0.02 , _a : Union[str, Any]=True , **_a : Optional[Any] , ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =prediction_length _SCREAMING_SNAKE_CASE =context_length or prediction_length _SCREAMING_SNAKE_CASE =distribution_output _SCREAMING_SNAKE_CASE =loss _SCREAMING_SNAKE_CASE =input_size _SCREAMING_SNAKE_CASE =num_time_features _SCREAMING_SNAKE_CASE =lags_sequence _SCREAMING_SNAKE_CASE =scaling _SCREAMING_SNAKE_CASE =num_dynamic_real_features _SCREAMING_SNAKE_CASE =num_static_real_features _SCREAMING_SNAKE_CASE =num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) _SCREAMING_SNAKE_CASE =cardinality else: _SCREAMING_SNAKE_CASE =[0] if embedding_dimension and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) _SCREAMING_SNAKE_CASE =embedding_dimension else: _SCREAMING_SNAKE_CASE =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _SCREAMING_SNAKE_CASE =num_parallel_samples # Transformer architecture configuration _SCREAMING_SNAKE_CASE =input_size * len(_a ) + self._number_of_features _SCREAMING_SNAKE_CASE =d_model _SCREAMING_SNAKE_CASE =encoder_attention_heads _SCREAMING_SNAKE_CASE =decoder_attention_heads _SCREAMING_SNAKE_CASE =encoder_ffn_dim _SCREAMING_SNAKE_CASE =decoder_ffn_dim _SCREAMING_SNAKE_CASE =encoder_layers _SCREAMING_SNAKE_CASE =decoder_layers _SCREAMING_SNAKE_CASE =dropout _SCREAMING_SNAKE_CASE =attention_dropout _SCREAMING_SNAKE_CASE =activation_dropout _SCREAMING_SNAKE_CASE =encoder_layerdrop _SCREAMING_SNAKE_CASE =decoder_layerdrop _SCREAMING_SNAKE_CASE =activation_function _SCREAMING_SNAKE_CASE =init_std _SCREAMING_SNAKE_CASE =use_cache super().__init__(is_encoder_decoder=_a , **_a ) @property def A ( self : List[Any] ) -> int: '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
47
0
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def snake_case ( A__ ): # vision encoder if "img_encoder.pos_embed" in name: UpperCAmelCase_ : str = name.replace("img_encoder.pos_embed" ,"vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: UpperCAmelCase_ : List[str] = name.replace("img_encoder.patch_embed.proj" ,"vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: UpperCAmelCase_ : List[Any] = name.replace("img_encoder.patch_embed.norm" ,"vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: UpperCAmelCase_ : int = name.replace("img_encoder.layers" ,"vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: UpperCAmelCase_ : Tuple = name.replace("blocks" ,"layers" ) if "attn" in name and "pre_assign" not in name: UpperCAmelCase_ : Any = name.replace("attn" ,"self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: UpperCAmelCase_ : str = name.replace("proj" ,"out_proj" ) if "pre_assign_attn.attn.proj" in name: UpperCAmelCase_ : str = name.replace("pre_assign_attn.attn.proj" ,"pre_assign_attn.attn.out_proj" ) if "norm1" in name: UpperCAmelCase_ : Tuple = name.replace("norm1" ,"layer_norm1" ) if "norm2" in name and "pre_assign" not in name: UpperCAmelCase_ : Dict = name.replace("norm2" ,"layer_norm2" ) if "img_encoder.norm" in name: UpperCAmelCase_ : Optional[Any] = name.replace("img_encoder.norm" ,"vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: UpperCAmelCase_ : Dict = name.replace("text_encoder.token_embedding" ,"text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: UpperCAmelCase_ : Optional[Any] = name.replace("text_encoder.positional_embedding" ,"text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: UpperCAmelCase_ : Optional[int] = name.replace("text_encoder.transformer.resblocks." ,"text_model.encoder.layers." ) if "ln_1" in name: UpperCAmelCase_ : Dict = name.replace("ln_1" ,"layer_norm1" ) if "ln_2" in name: UpperCAmelCase_ : Optional[int] = name.replace("ln_2" ,"layer_norm2" ) if "c_fc" in name: UpperCAmelCase_ : Optional[Any] = name.replace("c_fc" ,"fc1" ) if "c_proj" in name: UpperCAmelCase_ : Dict = name.replace("c_proj" ,"fc2" ) if "text_encoder" in name: UpperCAmelCase_ : Tuple = name.replace("text_encoder" ,"text_model" ) if "ln_final" in name: UpperCAmelCase_ : List[Any] = name.replace("ln_final" ,"final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: UpperCAmelCase_ : Union[str, Any] = name.replace("img_projector.linear_hidden." ,"visual_projection." ) if "img_projector.linear_out." in name: UpperCAmelCase_ : Dict = name.replace("img_projector.linear_out." ,"visual_projection.3." ) if "text_projector.linear_hidden" in name: UpperCAmelCase_ : List[Any] = name.replace("text_projector.linear_hidden" ,"text_projection" ) if "text_projector.linear_out" in name: UpperCAmelCase_ : Any = name.replace("text_projector.linear_out" ,"text_projection.3" ) return name def snake_case ( A__ ,A__ ): for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : Any = orig_state_dict.pop(A__ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors UpperCAmelCase_ : str = key.split("." ) UpperCAmelCase_ : Tuple = int(key_split[2] ), int(key_split[4] ) UpperCAmelCase_ : int = config.vision_config.hidden_size if "weight" in key: UpperCAmelCase_ : int = val[:dim, :] UpperCAmelCase_ : Dict = val[dim : dim * 2, :] UpperCAmelCase_ : Optional[int] = val[-dim:, :] else: UpperCAmelCase_ : int = val[:dim] UpperCAmelCase_ : List[Any] = val[dim : dim * 2] UpperCAmelCase_ : Dict = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors UpperCAmelCase_ : Dict = key.split("." ) UpperCAmelCase_ : List[Any] = int(key_split[3] ) UpperCAmelCase_ : Optional[int] = config.text_config.hidden_size if "weight" in key: UpperCAmelCase_ : Dict = val[:dim, :] UpperCAmelCase_ : List[str] = val[ dim : dim * 2, : ] UpperCAmelCase_ : str = val[-dim:, :] else: UpperCAmelCase_ : Dict = val[:dim] UpperCAmelCase_ : Tuple = val[dim : dim * 2] UpperCAmelCase_ : Optional[int] = val[-dim:] else: UpperCAmelCase_ : List[Any] = rename_key(A__ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): UpperCAmelCase_ : List[str] = val.squeeze_() else: UpperCAmelCase_ : Any = val return orig_state_dict def snake_case ( ): UpperCAmelCase_ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : List[str] = Image.open(requests.get(A__ ,stream=A__ ).raw ) return im @torch.no_grad() def snake_case ( A__ ,A__ ,A__="groupvit-gcc-yfcc" ,A__=False ): UpperCAmelCase_ : List[Any] = GroupViTConfig() UpperCAmelCase_ : Union[str, Any] = GroupViTModel(A__ ).eval() UpperCAmelCase_ : Any = torch.load(A__ ,map_location="cpu" )["model"] UpperCAmelCase_ : Tuple = convert_state_dict(A__ ,A__ ) UpperCAmelCase_ : str = model.load_state_dict(A__ ,strict=A__ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(A__ ) == 0) # verify result UpperCAmelCase_ : Optional[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) UpperCAmelCase_ : Union[str, Any] = prepare_img() UpperCAmelCase_ : int = processor(text=["a photo of a cat", "a photo of a dog"] ,images=A__ ,padding=A__ ,return_tensors="pt" ) with torch.no_grad(): UpperCAmelCase_ : Any = model(**A__ ) if model_name == "groupvit-gcc-yfcc": UpperCAmelCase_ : Tuple = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": UpperCAmelCase_ : Any = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(F"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image ,A__ ,atol=1e-3 ) processor.save_pretrained(A__ ) model.save_pretrained(A__ ) print("Successfully saved processor and model to" ,A__ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(A__ ,organization="nielsr" ) model.push_to_hub(A__ ,organization="nielsr" ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''') parser.add_argument( '''--model_name''', default='''groupvit-gccy-fcc''', type=str, help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''', ) lowerCamelCase_ = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
370
"""simple docstring""" def snake_case ( A__ = 10_00 ): UpperCAmelCase_ : Optional[Any] = 2**power UpperCAmelCase_ : Optional[int] = str(A__ ) UpperCAmelCase_ : Tuple = list(A__ ) UpperCAmelCase_ : Any = 0 for i in list_num: sum_of_num += int(A__ ) return sum_of_num if __name__ == "__main__": lowerCamelCase_ = int(input('''Enter the power of 2: ''').strip()) print('''2 ^ ''', power, ''' = ''', 2**power) lowerCamelCase_ = solution(power) print('''Sum of the digits is: ''', result)
253
0
'''simple docstring''' import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments __a: str = logging.getLogger(__name__) @dataclass class UpperCAmelCase ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = field( default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) SCREAMING_SNAKE_CASE = field(default=lowerCamelCase_ , metadata={"help": "Whether to SortishSamler or not."} ) SCREAMING_SNAKE_CASE = field( default=lowerCamelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) SCREAMING_SNAKE_CASE = field(default=lowerCamelCase_ , metadata={"help": "whether to use adafactor"} ) SCREAMING_SNAKE_CASE = field( default=lowerCamelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) SCREAMING_SNAKE_CASE = field( default=lowerCamelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) SCREAMING_SNAKE_CASE = field(default=lowerCamelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} ) SCREAMING_SNAKE_CASE = field( default=lowerCamelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} ) SCREAMING_SNAKE_CASE = field( default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
198
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _lowerCamelCase =2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _lowerCamelCase =50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _lowerCamelCase =0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] ) return (item, float(lowerCAmelCase_ )) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 ) SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:] SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ ) if random.uniform(0, 1 ) < MUTATION_PROBABILITY: SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ): """simple docstring""" SCREAMING_SNAKE_CASE =[] # Generate more children proportionally to the fitness score. SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1 SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n for _ in range(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ ) # Append new string to the population list. pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) ) pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) ) return pop def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ): """simple docstring""" if N_POPULATION < N_SELECTED: SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(lowerCAmelCase_ ) # Verify that the target contains no genes besides the ones inside genes variable. SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} ) if not_in_genes_list: SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(lowerCAmelCase_ ) # Generate random starting population. SCREAMING_SNAKE_CASE =[] for _ in range(lowerCAmelCase_ ): population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) ) # Just some logs to know what the algorithms is doing. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowerCAmelCase_ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population] # Check if there is a matching evolution. SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowerCAmelCase_ ) # Normalize population score to be between 0 and 1. SCREAMING_SNAKE_CASE =[ (item, score / len(lowerCAmelCase_ )) for item, score in population_score ] # This is selection for i in range(lowerCAmelCase_ ): population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowerCAmelCase_ ) > N_POPULATION: break if __name__ == "__main__": _lowerCamelCase =( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) _lowerCamelCase =list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
334
0
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __A : Union[str, Any] = get_logger() __A : Optional[dict] = None class __A ( TensorFormatter[Mapping, "jax.Array", Mapping] ): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Dict ): super().__init__(features=UpperCAmelCase_ ) import jax from jaxlib.xla_client import Device if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise ValueError( f"Expected {device} to be a `str` not {type(UpperCAmelCase_ )}, as `jaxlib.xla_extension.Device` " 'is not serializable neither with `pickle` nor with `dill`. Instead you can surround ' 'the device with `str()` to get its string identifier that will be internally mapped ' 'to the actual `jaxlib.xla_extension.Device`.' ) lowerCAmelCase : List[Any] = device if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: lowerCAmelCase : Optional[int] = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f"Device with string identifier {self.device} not listed among the available " f"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default " f"device: {str(jax.devices()[0] )}." ) lowerCAmelCase : Any = str(jax.devices()[0] ) lowerCAmelCase : Optional[int] = jnp_array_kwargs @staticmethod def lowercase__ ( ): import jax return {str(UpperCAmelCase_ ): device for device in jax.devices()} def lowercase__ ( self : str , UpperCAmelCase_ : Union[str, Any] ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and column: if all( isinstance(UpperCAmelCase_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(UpperCAmelCase_ , axis=0 ) return column def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase_ , (str, bytes, type(UpperCAmelCase_ )) ): return value elif isinstance(UpperCAmelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() lowerCAmelCase : Dict = {} if isinstance(UpperCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: lowerCAmelCase : int = {'dtype': jnp.intaa} else: lowerCAmelCase : str = {'dtype': jnp.intaa} elif isinstance(UpperCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): lowerCAmelCase : Optional[int] = {'dtype': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCAmelCase_ , PIL.Image.Image ): lowerCAmelCase : List[Any] = np.asarray(UpperCAmelCase_ ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: lowerCAmelCase : List[Any] = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(UpperCAmelCase_ , **{**default_dtype, **self.jnp_array_kwargs} ) def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(UpperCAmelCase_ , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(UpperCAmelCase_ , '__array__' ) and not isinstance(UpperCAmelCase_ , jax.Array ): lowerCAmelCase : int = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCAmelCase_ , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCAmelCase_ ) for substruct in data_struct] ) elif isinstance(UpperCAmelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCAmelCase_ ) for substruct in data_struct] ) return self._tensorize(UpperCAmelCase_ ) def lowercase__ ( self : int , UpperCAmelCase_ : dict ): return map_nested(self._recursive_tensorize , UpperCAmelCase_ , map_list=UpperCAmelCase_ ) def lowercase__ ( self : int , UpperCAmelCase_ : pa.Table ): lowerCAmelCase : int = self.numpy_arrow_extractor().extract_row(UpperCAmelCase_ ) lowerCAmelCase : List[Any] = self.python_features_decoder.decode_row(UpperCAmelCase_ ) return self.recursive_tensorize(UpperCAmelCase_ ) def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : pa.Table ): lowerCAmelCase : Any = self.numpy_arrow_extractor().extract_column(UpperCAmelCase_ ) lowerCAmelCase : Any = self.python_features_decoder.decode_column(UpperCAmelCase_ , pa_table.column_names[0] ) lowerCAmelCase : Any = self.recursive_tensorize(UpperCAmelCase_ ) lowerCAmelCase : Tuple = self._consolidate(UpperCAmelCase_ ) return column def lowercase__ ( self : List[str] , UpperCAmelCase_ : pa.Table ): lowerCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase_ ) lowerCAmelCase : str = self.python_features_decoder.decode_batch(UpperCAmelCase_ ) lowerCAmelCase : Optional[int] = self.recursive_tensorize(UpperCAmelCase_ ) for column_name in batch: lowerCAmelCase : List[Any] = self._consolidate(batch[column_name] ) return batch
360
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __A : List[Any] = trt.Logger(trt.Logger.WARNING) __A : Optional[Any] = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __A : List[Any] = logging.getLogger(__name__) __A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=384, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=128, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) __A : List[str] = parser.parse_args() if args.tokenizer_name: __A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) __A : List[Any] = args.per_device_eval_batch_size __A : Any = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __A : Any = True __A : Union[str, Any] = '''temp_engine/bert-fp32.engine''' if args.fpaa: __A : List[str] = '''temp_engine/bert-fp16.engine''' if args.inta: __A : Dict = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') __A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __A : str = [network.get_input(i) for i in range(network.num_inputs)] __A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __A : Dict = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __A : List[Any] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __A : Union[str, Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any: '''simple docstring''' lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa ) lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa ) lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase ) cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase ) cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase ) # start time lowerCAmelCase : List[Any] = time.time() # Run inference context.execute_async( bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) # Synchronize the stream and take time stream.synchronize() # end time lowerCAmelCase : List[str] = time.time() lowerCAmelCase : Tuple = end_time - start_time lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __A : List[str] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __A : int = raw_datasets['''validation'''].column_names __A : int = '''question''' if '''question''' in column_names else column_names[0] __A : List[str] = '''context''' if '''context''' in column_names else column_names[1] __A : int = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __A : str = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length) def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple: '''simple docstring''' lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. lowerCAmelCase : Union[str, Any] = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. lowerCAmelCase : Tuple = [] for i in range(len(tokenized_examples['input_ids'] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase ) lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. lowerCAmelCase : List[str] = sample_mapping[i] tokenized_examples["example_id"].append(examples['id'][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. lowerCAmelCase : List[Any] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['offset_mapping'][i] ) ] return tokenized_examples __A : int = raw_datasets['''validation'''] # Validation Feature Creation __A : Any = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) __A : List[str] = default_data_collator __A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) __A : Union[str, Any] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int: '''simple docstring''' lowerCAmelCase : str = postprocess_qa_predictions( examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, ) # Format the result to the format the metric expects. if args.version_2_with_negative: lowerCAmelCase : Union[str, Any] = [ {'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items() ] else: lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()] lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase ) __A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]: '''simple docstring''' return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize # Allocate device memory for inputs and outputs. __A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __A : Tuple = cuda.mem_alloc(h_outputa.nbytes) __A : Tuple = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __A : Union[str, Any] = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(F' Num examples = {len(eval_dataset)}') logger.info(F' Batch size = {args.per_device_eval_batch_size}') __A : Union[str, Any] = 0.0 __A : Optional[Any] = 0 __A : Optional[Any] = timeit.default_timer() __A : Optional[int] = None for step, batch in enumerate(eval_dataloader): __A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __A , __A : str = outputs __A : Optional[Any] = torch.tensor(start_logits) __A : Any = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __A : str = nested_truncate(all_preds, len(eval_dataset)) __A : Any = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000)) logger.info('''Total Number of Inference = %d''', niter) __A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds) __A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'Evaluation metrics: {eval_metric}')
323
0
"""simple docstring""" def snake_case_ ( A_ : str ): '''simple docstring''' assert column_title.isupper() _lowerCamelCase : List[Any] = 0 _lowerCamelCase : Tuple = len(A_ ) - 1 _lowerCamelCase : Dict = 0 while index >= 0: _lowerCamelCase : str = (ord(column_title[index] ) - 64) * pow(26, A_ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
72
"""simple docstring""" import unittest import numpy as np def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) if shape_a[0] != shape_b[0]: _lowerCamelCase : Tuple = ( '''Expected the same number of rows for A and B. ''' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(A_ ) if shape_b[1] != shape_c[1]: _lowerCamelCase : Tuple = ( '''Expected the same number of columns for B and C. ''' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(A_ ) _lowerCamelCase : List[str] = pseudo_inv if a_inv is None: try: _lowerCamelCase : Any = np.linalg.inv(A_ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] ) _lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] ) _lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase ) self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : int = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
72
1
import os import sys import transformers __a = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
352
from dataclasses import dataclass, field from typing import Optional @dataclass class __a: """simple docstring""" lowerCAmelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} ) lowerCAmelCase = field( default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} ) lowerCAmelCase = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} ) lowerCAmelCase = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size for training.'''} ) lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} ) lowerCAmelCase = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} ) lowerCAmelCase = field( default=1_0000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} ) lowerCAmelCase = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} ) lowerCAmelCase = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} ) lowerCAmelCase = field( default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} ) lowerCAmelCase = field( default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} ) lowerCAmelCase = field( default=_a , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} ) lowerCAmelCase = field(default=5_0000 , metadata={'''help''': '''Maximum number of training steps.'''} ) lowerCAmelCase = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) lowerCAmelCase = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} ) lowerCAmelCase = field(default=1 , metadata={'''help''': '''Training seed.'''} ) lowerCAmelCase = field( default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , ) lowerCAmelCase = field( default=_a , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} ) lowerCAmelCase = field(default=_a , metadata={'''help''': '''If True the data is pretokenized.'''} ) @dataclass class __a: """simple docstring""" lowerCAmelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) lowerCAmelCase = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} ) lowerCAmelCase = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) lowerCAmelCase = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} ) lowerCAmelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) @dataclass class __a: """simple docstring""" lowerCAmelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) lowerCAmelCase = field(default=_a , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) lowerCAmelCase = field( default=_a , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , ) lowerCAmelCase = field( default=_a , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} ) lowerCAmelCase = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} ) lowerCAmelCase = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} ) lowerCAmelCase = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} ) lowerCAmelCase = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} ) lowerCAmelCase = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} ) lowerCAmelCase = field( default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} ) lowerCAmelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) lowerCAmelCase = field( default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} ) lowerCAmelCase = field( default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} ) lowerCAmelCase = field( default=-1 , metadata={ '''help''': ( '''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive''' ''' number corresponds to which GPU device id to run on.''' ) } , ) @dataclass class __a: """simple docstring""" lowerCAmelCase = field( default=_a , metadata={ '''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.''' } , ) lowerCAmelCase = field( default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} ) lowerCAmelCase = field( default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} ) lowerCAmelCase = field( default=10_0000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} ) lowerCAmelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) lowerCAmelCase = field( default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} ) lowerCAmelCase = field( default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} ) lowerCAmelCase = field( default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} ) lowerCAmelCase = field( default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} ) lowerCAmelCase = field( default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} ) lowerCAmelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , ) lowerCAmelCase = field( default=_a , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} ) lowerCAmelCase = field( default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} ) @dataclass class __a: """simple docstring""" lowerCAmelCase = field( default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} ) lowerCAmelCase = field( default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} ) lowerCAmelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) lowerCAmelCase = field(default=20_0000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} ) lowerCAmelCase = field( default=3_2768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} ) lowerCAmelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} ) lowerCAmelCase = field(default=_a , metadata={'''help''': '''Push saved tokenizer to the hub.'''} ) @dataclass class __a: """simple docstring""" lowerCAmelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} ) lowerCAmelCase = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} ) lowerCAmelCase = field( default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} ) lowerCAmelCase = field(default=_a , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) @dataclass class __a: """simple docstring""" lowerCAmelCase = field( default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} ) lowerCAmelCase = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} ) lowerCAmelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} ) lowerCAmelCase = field(default=_a , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
235
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a :Optional[int] = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a :Optional[int] = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
132
"""simple docstring""" import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration a :Union[str, Any] = 500_000 a ,a :Union[str, Any] = os.path.split(__file__) a :Union[str, Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def _lowercase ( __lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : str = dataset.map(**__lowerCAmelCase ) @get_duration def _lowercase ( __lowerCAmelCase , **__lowerCAmelCase ) -> int: SCREAMING_SNAKE_CASE__ : List[str] = dataset.filter(**__lowerCAmelCase ) def _lowercase ( ) -> str: SCREAMING_SNAKE_CASE__ : str = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ : Tuple = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) SCREAMING_SNAKE_CASE__ : Any = generate_example_dataset( os.path.join(__lowerCAmelCase , """dataset.arrow""" ) , __lowerCAmelCase , num_examples=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__lowerCAmelCase ) def tokenize(__lowerCAmelCase ): return tokenizer(examples["""text"""] ) SCREAMING_SNAKE_CASE__ : List[str] = map(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = map(__lowerCAmelCase , batched=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase ) with dataset.formatted_as(type="""numpy""" ): SCREAMING_SNAKE_CASE__ : Any = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase ) with dataset.formatted_as(type="""pandas""" ): SCREAMING_SNAKE_CASE__ : Optional[int] = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): SCREAMING_SNAKE_CASE__ : Any = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): SCREAMING_SNAKE_CASE__ : int = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = map(__lowerCAmelCase , function=__lowerCAmelCase , batched=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : int = filter(__lowerCAmelCase ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(__lowerCAmelCase , """wb""" ) as f: f.write(json.dumps(__lowerCAmelCase ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
132
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ ={"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ =[ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys UpperCamelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
352
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class _a ( unittest.TestCase ): def snake_case ( self : Tuple ) -> Dict: '''simple docstring''' _UpperCamelCase : int = tempfile.mkdtemp() _UpperCamelCase : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''的''', '''价''', '''格''', '''是''', '''15''', '''便''', '''alex''', '''##andra''', ''',''', '''。''', '''-''', '''t''', '''shirt''', ] _UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _UpperCamelCase : Dict = { '''do_resize''': True, '''size''': {'''height''': 2_2_4, '''width''': 2_2_4}, '''do_center_crop''': True, '''crop_size''': {'''height''': 1_8, '''width''': 1_8}, '''do_normalize''': True, '''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073], '''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711], '''do_convert_rgb''': True, } _UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, lowerCAmelCase__ ) with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp: json.dump(lowerCAmelCase__, lowerCAmelCase__ ) def snake_case ( self : str, **lowerCAmelCase__ : List[Any] ) -> Optional[int]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__ ) def snake_case ( self : Union[str, Any], **lowerCAmelCase__ : Tuple ) -> str: '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__ ) def snake_case ( self : Any, **lowerCAmelCase__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__ ) def snake_case ( self : str ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def snake_case ( self : Any ) -> int: '''simple docstring''' _UpperCamelCase : List[str] = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )] _UpperCamelCase : List[Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1 ) ) for x in image_inputs] return image_inputs def snake_case ( self : str ) -> Any: '''simple docstring''' _UpperCamelCase : Any = self.get_tokenizer() _UpperCamelCase : int = self.get_rust_tokenizer() _UpperCamelCase : int = self.get_image_processor() _UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) _UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__ ) _UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) _UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase__ ) self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase__ ) self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase__ ) def snake_case ( self : int ) -> Tuple: '''simple docstring''' _UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase : Dict = self.get_tokenizer(cls_token='''(CLS)''', sep_token='''(SEP)''' ) _UpperCamelCase : List[str] = self.get_image_processor(do_normalize=lowerCAmelCase__ ) _UpperCamelCase : Optional[Any] = ChineseCLIPProcessor.from_pretrained( self.tmpdirname, cls_token='''(CLS)''', sep_token='''(SEP)''', do_normalize=lowerCAmelCase__ ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor, lowerCAmelCase__ ) def snake_case ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase : List[str] = self.get_image_processor() _UpperCamelCase : str = self.get_tokenizer() _UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ ) _UpperCamelCase : List[str] = self.prepare_image_inputs() _UpperCamelCase : Any = image_processor(lowerCAmelCase__, return_tensors='''np''' ) _UpperCamelCase : Any = processor(images=lowerCAmelCase__, return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def snake_case ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase : Tuple = self.get_image_processor() _UpperCamelCase : Optional[Any] = self.get_tokenizer() _UpperCamelCase : Any = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ ) _UpperCamelCase : Tuple = '''Alexandra,T-shirt的价格是15便士。''' _UpperCamelCase : List[str] = processor(text=lowerCAmelCase__ ) _UpperCamelCase : Any = tokenizer(lowerCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def snake_case ( self : Dict ) -> Tuple: '''simple docstring''' _UpperCamelCase : Tuple = self.get_image_processor() _UpperCamelCase : Optional[Any] = self.get_tokenizer() _UpperCamelCase : Dict = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ ) _UpperCamelCase : Any = '''Alexandra,T-shirt的价格是15便士。''' _UpperCamelCase : Union[str, Any] = self.prepare_image_inputs() _UpperCamelCase : str = processor(text=lowerCAmelCase__, images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__ ): processor() def snake_case ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase : int = self.get_image_processor() _UpperCamelCase : int = self.get_tokenizer() _UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ ) _UpperCamelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase : List[Any] = processor.batch_decode(lowerCAmelCase__ ) _UpperCamelCase : Dict = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ ) def snake_case ( self : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCamelCase : Any = self.get_image_processor() _UpperCamelCase : Optional[int] = self.get_tokenizer() _UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ ) _UpperCamelCase : Any = '''Alexandra,T-shirt的价格是15便士。''' _UpperCamelCase : int = self.prepare_image_inputs() _UpperCamelCase : Dict = processor(text=lowerCAmelCase__, images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
128
0
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging _snake_case = logging.get_logger(__name__) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = set() _lowerCAmelCase : int = [] def parse_line(_lowerCamelCase ): for line in fp: if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : List[str] = line.decode("UTF-8" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" " ): # process a single warning and move it to `selected_warnings`. if len(_lowerCamelCase ) > 0: _lowerCAmelCase : Any = "\n".join(_lowerCamelCase ) # Only keep the warnings specified in `targets` if any(F": {x}: " in warning for x in targets ): selected_warnings.add(_lowerCamelCase ) buffer.clear() continue else: _lowerCAmelCase : Tuple = line.strip() buffer.append(_lowerCamelCase ) if from_gh: for filename in os.listdir(_lowerCamelCase ): _lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase ) if not os.path.isdir(_lowerCamelCase ): # read the file if filename != "warnings.txt": continue with open(_lowerCamelCase ) as fp: parse_line(_lowerCamelCase ) else: try: with zipfile.ZipFile(_lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_lowerCamelCase ): # read the file if filename != "warnings.txt": continue with z.open(_lowerCamelCase ) as fp: parse_line(_lowerCamelCase ) except Exception: logger.warning( F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." ) return selected_warnings def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = set() _lowerCAmelCase : List[str] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) ) return selected_warnings if __name__ == "__main__": def A ( _lowerCamelCase ): '''simple docstring''' return values.split("," ) _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") # optional parameters parser.add_argument( "--targets", default="DeprecationWarning,UserWarning,FutureWarning", type=list_str, help="Comma-separated list of target warning(s) which we want to extract.", ) parser.add_argument( "--from_gh", action="store_true", help="If running from a GitHub action workflow and collecting warnings from its artifacts.", ) _snake_case = parser.parse_args() _snake_case = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links _snake_case = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("=" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts _snake_case = extract_warnings(args.output_dir, args.targets) _snake_case = sorted(selected_warnings) with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
36
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _A ( __magic_name__ , unittest.TestCase): SCREAMING_SNAKE_CASE : str = AudioLDMPipeline SCREAMING_SNAKE_CASE : Dict = TEXT_TO_AUDIO_PARAMS SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS SCREAMING_SNAKE_CASE : Dict = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ]) def UpperCAmelCase ( self ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ : Any = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Optional[int] = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) SCREAMING_SNAKE_CASE_ : List[str] = ClapTextModelWithProjection(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[Any] = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 ) SCREAMING_SNAKE_CASE_ : Any = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ : List[str] = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ): """simple docstring""" if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : List[Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios[0] assert audio.ndim == 1 assert len(_SCREAMING_SNAKE_CASE ) == 256 SCREAMING_SNAKE_CASE_ : int = audio[:10] SCREAMING_SNAKE_CASE_ : str = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : str = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[Any] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : int = audioldm_pipe.to(_SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Any = 3 * [inputs['prompt']] # forward SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.audios[0] SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[int] = 3 * [inputs.pop('prompt' )] SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.tokenizer( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ : List[str] = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.text_encoder( _SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ : List[str] = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE_ : Tuple = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 ) SCREAMING_SNAKE_CASE_ : Optional[int] = prompt_embeds # forward SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Any = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Any = audioldm_pipe.to(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : int = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE_ : str = negative_prompt SCREAMING_SNAKE_CASE_ : List[Any] = 3 * [inputs['prompt']] # forward SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : int = output.audios[0] SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = 3 * [inputs.pop('prompt' )] SCREAMING_SNAKE_CASE_ : List[str] = [] for p in [prompt, negative_prompt]: SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.tokenizer( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe.text_encoder( _SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = text_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE_ : Any = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 ) embeds.append(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = embeds # forward SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Any = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Dict = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe.to(_SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[str] = 'egg cracking' SCREAMING_SNAKE_CASE_ : str = audioldm_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[str] = output.audios[0] assert audio.ndim == 1 assert len(_SCREAMING_SNAKE_CASE ) == 256 SCREAMING_SNAKE_CASE_ : Optional[Any] = audio[:10] SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[int] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts SCREAMING_SNAKE_CASE_ : Any = 2 SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt SCREAMING_SNAKE_CASE_ : Optional[int] = 2 SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts SCREAMING_SNAKE_CASE_ : str = 2 SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.to(_SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.vocoder.config.sampling_rate SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 , **_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : int = output.audios[0] assert audio.ndim == 1 assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.016 SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(audio_length_in_s=0.032 , **_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[Any] = output.audios[0] assert audio.ndim == 1 assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.032 def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe.to(_SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Dict = ['hey'] SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios.shape assert audio_shape == (1, 256) SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.vocoder.config config.model_in_dim *= 2 SCREAMING_SNAKE_CASE_ : int = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 ) SCREAMING_SNAKE_CASE_ : int = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def UpperCAmelCase ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): """simple docstring""" self._test_inference_batch_single_identical(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCAmelCase ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE ) @slow class _A ( unittest.TestCase): def UpperCAmelCase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 8, 128, 16) ) SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_inputs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[str] = 25 SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0] assert audio.ndim == 1 assert len(_SCREAMING_SNAKE_CASE ) == 8_1920 SCREAMING_SNAKE_CASE_ : Any = audio[7_7230:7_7240] SCREAMING_SNAKE_CASE_ : List[Any] = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) SCREAMING_SNAKE_CASE_ : int = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) SCREAMING_SNAKE_CASE_ : Optional[int] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE ) audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : int = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0] assert audio.ndim == 1 assert len(_SCREAMING_SNAKE_CASE ) == 8_1920 SCREAMING_SNAKE_CASE_ : Union[str, Any] = audio[2_7780:2_7790] SCREAMING_SNAKE_CASE_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) SCREAMING_SNAKE_CASE_ : str = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
253
0
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } __A = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict: for attribute in key.split(""".""" ): _lowerCAmelCase =getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: _lowerCAmelCase =getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: _lowerCAmelCase =hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _lowerCAmelCase =value elif weight_type == "weight_g": _lowerCAmelCase =value elif weight_type == "weight_v": _lowerCAmelCase =value elif weight_type == "bias": _lowerCAmelCase =value else: _lowerCAmelCase =value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: _lowerCAmelCase =[] _lowerCAmelCase =fairseq_model.state_dict() _lowerCAmelCase =hf_model.feature_extractor for name, value in fairseq_dict.items(): _lowerCAmelCase =False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) _lowerCAmelCase =True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: _lowerCAmelCase =True if "*" in mapped_key: _lowerCAmelCase =name.split(_lowerCAmelCase )[0].split(""".""" )[-2] _lowerCAmelCase =mapped_key.replace("""*""" , _lowerCAmelCase ) if "weight_g" in name: _lowerCAmelCase ="""weight_g""" elif "weight_v" in name: _lowerCAmelCase ="""weight_v""" elif "bias" in name and "relative_attention_bias" not in name: _lowerCAmelCase ="""bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCAmelCase ="""weight""" else: _lowerCAmelCase =None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: _lowerCAmelCase =full_name.split("""conv_layers.""" )[-1] _lowerCAmelCase =name.split(""".""" ) _lowerCAmelCase =int(items[0] ) _lowerCAmelCase =int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _lowerCAmelCase =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _lowerCAmelCase =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _lowerCAmelCase =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _lowerCAmelCase =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ) -> Any: _lowerCAmelCase =torch.load(_lowerCAmelCase ) _lowerCAmelCase =WavLMConfigOrig(checkpoint["""cfg"""] ) _lowerCAmelCase =WavLMOrig(_lowerCAmelCase ) model.load_state_dict(checkpoint["""model"""] ) model.eval() if config_path is not None: _lowerCAmelCase =WavLMConfig.from_pretrained(_lowerCAmelCase ) else: _lowerCAmelCase =WavLMConfig() _lowerCAmelCase =WavLMModel(_lowerCAmelCase ) recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase ) hf_wavlm.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __A = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
354
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class lowerCamelCase__ ( lowercase_): SCREAMING_SNAKE_CASE__ = '''ibert''' def __init__(self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=False , UpperCAmelCase="none" , **UpperCAmelCase , ) -> Union[str, Any]: super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) _lowercase =vocab_size _lowercase =hidden_size _lowercase =num_hidden_layers _lowercase =num_attention_heads _lowercase =hidden_act _lowercase =intermediate_size _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =max_position_embeddings _lowercase =type_vocab_size _lowercase =initializer_range _lowercase =layer_norm_eps _lowercase =position_embedding_type _lowercase =quant_mode _lowercase =force_dequant class lowerCamelCase__ ( lowercase_): @property def __A (self ) -> Dict: if self.task == "multiple-choice": _lowercase ={0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowercase ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
5
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
323
0
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCAmelCase = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=1_8 , _UpperCamelCase=3_0 , _UpperCamelCase=4_0_0 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , ) -> Tuple: UpperCAmelCase_ : Any = size if size is not None else {'height': 2_0, 'width': 2_0} UpperCAmelCase_ : Optional[int] = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : List[Any] = image_size UpperCAmelCase_ : Dict = min_resolution UpperCAmelCase_ : List[Any] = max_resolution UpperCAmelCase_ : Optional[Any] = size UpperCAmelCase_ : Union[str, Any] = do_normalize UpperCAmelCase_ : List[Any] = do_convert_rgb UpperCAmelCase_ : Tuple = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] UpperCAmelCase_ : Optional[Any] = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6} def __UpperCAmelCase ( self ) -> Union[str, Any]: return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : List[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' UpperCAmelCase_ : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCamelCase (_snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Union[str, Any] = PixaStructImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Tuple = PixaStructImageProcessingTester(self ) @property def __UpperCAmelCase ( self ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) ) def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : str = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase_ : str = 2_0_4_8 UpperCAmelCase_ : List[str] = image_processor(_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) ) def __UpperCAmelCase ( self ) -> int: # Initialize image_processor UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input UpperCAmelCase_ : Optional[Any] = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ : Union[str, Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ : Optional[Any] = image_processor( _UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __UpperCAmelCase ( self ) -> Optional[int]: # Initialize image_processor UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input UpperCAmelCase_ : List[str] = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase_ : Optional[int] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(_UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches UpperCAmelCase_ : Optional[Any] = 'Hello' UpperCAmelCase_ : Optional[int] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ : Optional[Any] = image_processor( _UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __UpperCAmelCase ( self ) -> int: # Initialize image_processor UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , np.ndarray ) UpperCAmelCase_ : Any = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ : str = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ : Optional[int] = image_processor( _UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __UpperCAmelCase ( self ) -> Optional[Any]: # Initialize image_processor UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , torch.Tensor ) # Test not batched input UpperCAmelCase_ : Any = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ : Optional[Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ : int = image_processor( _UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCamelCase (_snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : str = PixaStructImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ : Union[str, Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase_ : Tuple = 3 @property def __UpperCAmelCase ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) ) self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) ) def __UpperCAmelCase ( self ) -> Dict: # Initialize image_processor UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input UpperCAmelCase_ : Optional[Any] = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ : Dict = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ : List[Any] = image_processor( _UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
145
from __future__ import annotations from collections.abc import Callable __UpperCAmelCase = list[list[float | int]] def lowercase__ ( __snake_case : Matrix , __snake_case : Matrix ): '''simple docstring''' UpperCAmelCase_ : int = len(__snake_case ) UpperCAmelCase_ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__snake_case )] UpperCAmelCase_ : int UpperCAmelCase_ : int UpperCAmelCase_ : int UpperCAmelCase_ : int UpperCAmelCase_ : int UpperCAmelCase_ : float for row in range(__snake_case ): for col in range(__snake_case ): UpperCAmelCase_ : Dict = matrix[row][col] UpperCAmelCase_ : Union[str, Any] = vector[row][0] UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : Union[str, Any] = 0 while row < size and col < size: # pivoting UpperCAmelCase_ : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , __snake_case ): UpperCAmelCase_ : Optional[int] = augmented[rowa][col] / augmented[row][col] UpperCAmelCase_ : List[str] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , __snake_case ): for row in range(__snake_case ): UpperCAmelCase_ : Union[str, Any] = augmented[row][col] / augmented[col][col] for cola in range(__snake_case , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case ) ] def lowercase__ ( __snake_case : list[int] ): '''simple docstring''' UpperCAmelCase_ : int = len(__snake_case ) UpperCAmelCase_ : Matrix = [[0 for _ in range(__snake_case )] for _ in range(__snake_case )] UpperCAmelCase_ : Matrix = [[0] for _ in range(__snake_case )] UpperCAmelCase_ : Matrix UpperCAmelCase_ : int UpperCAmelCase_ : int UpperCAmelCase_ : int for x_val, y_val in enumerate(__snake_case ): for col in range(__snake_case ): UpperCAmelCase_ : int = (x_val + 1) ** (size - col - 1) UpperCAmelCase_ : int = y_val UpperCAmelCase_ : List[str] = solve(__snake_case , __snake_case ) def interpolated_func(__snake_case : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__snake_case ) ) return interpolated_func def lowercase__ ( __snake_case : int ): '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def lowercase__ ( __snake_case : Callable[[int], int] = question_function , __snake_case : int = 10 ): '''simple docstring''' UpperCAmelCase_ : list[int] = [func(__snake_case ) for x_val in range(1 , order + 1 )] UpperCAmelCase_ : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] UpperCAmelCase_ : int = 0 UpperCAmelCase_ : Callable[[int], int] UpperCAmelCase_ : int for poly in polynomials: UpperCAmelCase_ : Optional[int] = 1 while func(__snake_case ) == poly(__snake_case ): x_val += 1 ret += poly(__snake_case ) return ret if __name__ == "__main__": print(F'{solution() = }')
145
1
from datetime import datetime import matplotlib.pyplot as plt import torch def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): for param in module.parameters(): __UpperCamelCase =False def _UpperCAmelCase ( ): __UpperCamelCase ='cuda' if torch.cuda.is_available() else 'cpu' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): __UpperCamelCase ='mps' if device == "mps": print( 'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch' ' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues' ' with generations.' ) return device def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ): __UpperCamelCase =plt.imshow(SCREAMING_SNAKE_CASE__ ) fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE__ ) fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE__ ) plt.show() def _UpperCAmelCase ( ): __UpperCamelCase =datetime.now() __UpperCamelCase =current_time.strftime('%H:%M:%S' ) return timestamp
62
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL a__ = logging.get_logger(__name__) def __UpperCAmelCase ( __a : np.ndarray ,__a : Union[int, Iterable[int]] ,__a : bool ,__a : int ) -> Tuple[int, int]: """simple docstring""" def constraint_to_multiple_of(__a : List[str] ,__a : Dict ,__a : Any=0 ,__a : int=None ): _a : Dict = round(val / multiple ) * multiple if max_val is not None and x > max_val: _a : Any = math.floor(val / multiple ) * multiple if x < min_val: _a : Dict = math.ceil(val / multiple ) * multiple return x _a : Union[str, Any] = (output_size, output_size) if isinstance(__a ,__a ) else output_size _a , _a : List[Any] = get_image_size(__a ) _a , _a : Any = output_size # determine new height and width _a : Union[str, Any] = output_height / input_height _a : Tuple = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _a : Optional[Any] = scale_width else: # fit height _a : Tuple = scale_height _a : Optional[Any] = constraint_to_multiple_of(scale_height * input_height ,multiple=__a ) _a : int = constraint_to_multiple_of(scale_width * input_width ,multiple=__a ) return (new_height, new_width) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = False , _a = 1 , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , **_a , ) -> None: super().__init__(**_a ) _a : Optional[int] = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4} _a : Optional[Any] = get_size_dict(_a ) _a : Any = do_resize _a : Dict = size _a : str = keep_aspect_ratio _a : Any = ensure_multiple_of _a : Optional[Any] = resample _a : List[Any] = do_rescale _a : int = rescale_factor _a : Any = do_normalize _a : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowercase ( self , _a , _a , _a = False , _a = 1 , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray: _a : str = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _a : Optional[Any] = get_resize_output_image_size( _a , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_a , multiple=_a , ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> int: return rescale(_a , scale=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: _a : Optional[int] = do_resize if do_resize is not None else self.do_resize _a : Union[str, Any] = size if size is not None else self.size _a : str = get_size_dict(_a ) _a : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _a : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _a : str = resample if resample is not None else self.resample _a : str = do_rescale if do_rescale is not None else self.do_rescale _a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _a : str = image_mean if image_mean is not None else self.image_mean _a : Tuple = image_std if image_std is not None else self.image_std _a : Dict = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a : Dict = [to_numpy_array(_a ) for image in images] if do_resize: _a : int = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_rescale: _a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _a : Optional[int] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] _a : int = [to_channel_dimension_format(_a , _a ) for image in images] _a : Tuple = {'''pixel_values''': images} return BatchFeature(data=_a , tensor_type=_a ) def __lowercase ( self , _a , _a = None ) -> Any: _a : Optional[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_a ) != len(_a ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(_a ): _a : List[Any] = target_sizes.numpy() _a : str = [] for idx in range(len(_a ) ): _a : str = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_a ) _a : Union[str, Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_a ) else: _a : Tuple = logits.argmax(dim=1 ) _a : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
235
0
from __future__ import annotations import math def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: int ): '''simple docstring''' lowercase_ = u for i in range(1 , __lowerCamelCase ): lowercase_ = temp * (u - i) return temp def SCREAMING_SNAKE_CASE_ ( ): '''simple docstring''' lowercase_ = int(input("enter the numbers of values: " ) ) lowercase_ = [] for _ in range(__lowerCamelCase ): y.append([] ) for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): y[i].append(__lowerCamelCase ) lowercase_ = 0 print("enter the values of parameters in a list: " ) lowercase_ = list(map(__lowerCamelCase , input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(__lowerCamelCase ): lowercase_ = float(input() ) lowercase_ = int(input("enter the value to interpolate: " ) ) lowercase_ = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , __lowerCamelCase ): for j in range(n - i ): lowercase_ = y[j + 1][i - 1] - y[j][i - 1] lowercase_ = y[0][0] for i in range(1 , __lowerCamelCase ): summ += (ucal(__lowerCamelCase , __lowerCamelCase ) * y[0][i]) / math.factorial(__lowerCamelCase ) print(F'the value at {value} is {summ}' ) if __name__ == "__main__": main()
297
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ): '''simple docstring''' lowercase_ = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("All input parameters must be positive" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("Relative densities cannot be greater than one" ) else: lowercase_ = 1 - (matter_density + radiation_density + dark_energy) lowercase_ = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) lowercase_ = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation SCREAMING_SNAKE_CASE__ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
297
1
"""simple docstring""" import numpy class __magic_name__ : '''simple docstring''' def __init__( self , _a , _a ): """simple docstring""" lowerCamelCase = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCamelCase = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCamelCase = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCamelCase = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCamelCase = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCamelCase = numpy.zeros(output_array.shape ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCamelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCamelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCamelCase = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCamelCase = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" for iteration in range(1 , iterations + 1 ): lowerCamelCase = self.feedforward() self.back_propagation() if give_loss: lowerCamelCase = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'Iteration {iteration} Loss: {loss}' ) def _lowerCAmelCase ( self , _a ): """simple docstring""" lowerCamelCase = input_arr lowerCamelCase = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCamelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCamelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def a__ ( snake_case__ ) -> Tuple: return 1 / (1 + numpy.exp(-value )) def a__ ( snake_case__ ) -> List[Any]: return (value) * (1 - (value)) def a__ ( ) -> Any: lowerCamelCase = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCamelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. lowerCamelCase = TwoHiddenLayerNeuralNetwork( input_array=_lowerCAmelCase , output_array=_lowerCAmelCase ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_lowerCAmelCase , iterations=10 , give_loss=_lowerCAmelCase ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
291
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class _lowercase : '''simple docstring''' def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=50 , snake_case__=0.02 , snake_case__=True , snake_case__=None , ): '''simple docstring''' UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_input_mask UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_act UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = initializer_range UpperCamelCase_ = use_labels UpperCamelCase_ = scope def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = None if self.use_input_mask: UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = self.get_config() return config, input_ids, input_mask, token_labels def _lowerCamelCase ( self ): '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self ): '''simple docstring''' ( ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ) = self.prepare_config_and_inputs() UpperCamelCase_ = True UpperCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ): '''simple docstring''' UpperCamelCase_ = BertGenerationEncoder(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ ) UpperCamelCase_ = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ): '''simple docstring''' UpperCamelCase_ = True UpperCamelCase_ = BertGenerationEncoder(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCamelCase_ = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , ) UpperCamelCase_ = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ): '''simple docstring''' UpperCamelCase_ = True UpperCamelCase_ = True UpperCamelCase_ = BertGenerationDecoder(config=snake_case__ ).to(snake_case__ ).eval() # first forward pass UpperCamelCase_ = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , ) UpperCamelCase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase_ = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] UpperCamelCase_ = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] # select random slice UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) ) def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ , ): '''simple docstring''' UpperCamelCase_ = BertGenerationDecoder(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs() UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase (a_ , a_ , a_ , unittest.TestCase ): '''simple docstring''' lowercase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase__ = (BertGenerationDecoder,) if is_torch_available() else () lowercase__ = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = BertGenerationEncoderTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def _lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() UpperCamelCase_ = "bert" self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*snake_case__ ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case__ ) def _lowerCamelCase ( self ): '''simple docstring''' ( ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase_ = None self.model_tester.create_and_check_model_as_decoder( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*snake_case__ ) @slow def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(snake_case__ ) @require_torch class _lowercase (unittest.TestCase ): '''simple docstring''' @slow def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) UpperCamelCase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): UpperCamelCase_ = model(snake_case__ )[0] UpperCamelCase_ = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , snake_case__ ) UpperCamelCase_ = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) ) @require_torch class _lowercase (unittest.TestCase ): '''simple docstring''' @slow def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) UpperCamelCase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): UpperCamelCase_ = model(snake_case__ )[0] UpperCamelCase_ = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape , snake_case__ ) UpperCamelCase_ = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
128
0
from ..utils import DummyObject, requires_backends class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> int: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict: requires_backends(__lowerCAmelCase , ["torch"] ) def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]: requires_backends(__lowerCAmelCase , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : int = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Any = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : str = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Union[str, Any] = ['''torch'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[Any] = ['''torch'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Optional[int] = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : List[str] = ['''torch'''] def __init__( self : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Tuple = ['''torch'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ): '''simple docstring''' requires_backends(cls , ["torch"] ) class __a ( metaclass=A__ ): _lowerCAmelCase : Dict = ['''torch'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ): '''simple docstring''' requires_backends(self , ["torch"] ) @classmethod def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["torch"] ) @classmethod def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["torch"] )
354
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), f'The input value of [n={number}] is not an integer' if number == 1: return 2 elif number < 1: UpperCamelCase__ : List[Any] = f'The input value of [n={number}] has to be > 0' raise ValueError(__lowerCAmelCase ) else: UpperCamelCase__ : Optional[Any] = sylvester(number - 1 ) UpperCamelCase__ : str = num - 1 UpperCamelCase__ : int = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
196
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowercase__ : int = "Create a default config file for Accelerate with only a few flags set." def A_ ( snake_case : int="no" , snake_case : str = default_json_config_file , snake_case : bool = False ) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase = Path(snake_case ) path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) if path.exists(): print( f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." ) return False __UpperCamelCase = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" ) __UpperCamelCase = { '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): __UpperCamelCase = torch.cuda.device_count() __UpperCamelCase = num_gpus __UpperCamelCase = False if num_gpus > 1: __UpperCamelCase = '''MULTI_GPU''' else: __UpperCamelCase = '''NO''' elif is_xpu_available() and use_xpu: __UpperCamelCase = torch.xpu.device_count() __UpperCamelCase = num_xpus __UpperCamelCase = False if num_xpus > 1: __UpperCamelCase = '''MULTI_XPU''' else: __UpperCamelCase = '''NO''' elif is_npu_available(): __UpperCamelCase = torch.npu.device_count() __UpperCamelCase = num_npus __UpperCamelCase = False if num_npus > 1: __UpperCamelCase = '''MULTI_NPU''' else: __UpperCamelCase = '''NO''' else: __UpperCamelCase = 0 __UpperCamelCase = True __UpperCamelCase = 1 __UpperCamelCase = '''NO''' __UpperCamelCase = ClusterConfig(**snake_case ) config.to_json_file(snake_case ) return path def A_ ( snake_case : Dict , snake_case : Optional[Any] ) -> Optional[int]: '''simple docstring''' __UpperCamelCase = parser.add_parser('''default''' , parents=snake_case , help=snake_case , formatter_class=snake_case ) parser.add_argument( '''--config_file''' , default=snake_case , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=snake_case , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=snake_case ) return parser def A_ ( snake_case : Optional[int] ) -> int: '''simple docstring''' __UpperCamelCase = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"accelerate configuration saved at {config_file}" )
328
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType lowercase__ : str = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'whisper' _snake_case = ['past_key_values'] _snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = num_mel_bins __UpperCamelCase = d_model __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = use_cache __UpperCamelCase = encoder_layers __UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase = max_source_positions __UpperCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __UpperCamelCase = classifier_proj_size __UpperCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCamelCase = apply_spec_augment __UpperCamelCase = mask_time_prob __UpperCamelCase = mask_time_length __UpperCamelCase = mask_time_min_masks __UpperCamelCase = mask_feature_prob __UpperCamelCase = mask_feature_length __UpperCamelCase = mask_feature_min_masks __UpperCamelCase = median_filter_width super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def A__ ( self )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' __UpperCamelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: __UpperCamelCase = {0: '''batch'''} else: __UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' ) return common_inputs def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]: '''simple docstring''' __UpperCamelCase = OrderedDict() __UpperCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = encoder_inputs['''input_features'''].shape[2] __UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __UpperCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = encoder_inputs.pop('''input_features''' ) __UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: __UpperCamelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def A__ ( self )-> float: '''simple docstring''' return 1E-3
328
1
"""simple docstring""" import math def lowerCamelCase (a_ :int) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a_) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCamelCase (a_ :int = 1_0001) -> int: try: lowercase :Optional[int] = int(a_) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''') from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''') lowercase :list[int] = [] lowercase :Optional[int] = 2 while len(a_) < nth: if is_prime(a_): primes.append(a_) num += 1 else: num += 1 return primes[len(a_) - 1] if __name__ == "__main__": print(F"""{solution() = }""")
172
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } UpperCAmelCase = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } UpperCAmelCase = {'''facebook/blenderbot-3B''': 128} class __magic_name__ ( __UpperCAmelCase ): __A : Any = VOCAB_FILES_NAMES __A : List[str] = PRETRAINED_VOCAB_FILES_MAP __A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Optional[int] = ["input_ids", "attention_mask"] __A : Optional[Any] = BlenderbotTokenizer def __init__( self : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : List[Any]=None , snake_case__ : Dict="replace" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Any="</s>" , snake_case__ : Any="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : str="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : int=False , snake_case__ : List[Any]=True , **snake_case__ : Any , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) lowercase :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , snake_case__ ) != add_prefix_space: lowercase :int = getattr(snake_case__ , pre_tok_state.pop('''type''' ) ) lowercase :List[str] = add_prefix_space lowercase :Any = pre_tok_class(**snake_case__ ) lowercase :Tuple = add_prefix_space lowercase :List[Any] = '''post_processor''' lowercase :Optional[Any] = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: lowercase :int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase :List[Any] = tuple(state['''sep'''] ) if "cls" in state: lowercase :List[str] = tuple(state['''cls'''] ) lowercase :Dict = False if state.get('''add_prefix_space''' , snake_case__ ) != add_prefix_space: lowercase :str = add_prefix_space lowercase :int = True if state.get('''trim_offsets''' , snake_case__ ) != trim_offsets: lowercase :List[str] = trim_offsets lowercase :Optional[Any] = True if changes_to_apply: lowercase :Optional[Any] = getattr(snake_case__ , state.pop('''type''' ) ) lowercase :List[Any] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def __snake_case ( self : Dict ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __snake_case ( self : Dict , snake_case__ : Union[str, Any] ): '''simple docstring''' lowercase :Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value lowercase :List[str] = value def __snake_case ( self : int , *snake_case__ : Optional[int] , **snake_case__ : Tuple ): '''simple docstring''' lowercase :int = kwargs.get('''is_split_into_words''' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def __snake_case ( self : List[Any] , *snake_case__ : Optional[Any] , **snake_case__ : str ): '''simple docstring''' lowercase :int = kwargs.get('''is_split_into_words''' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def __snake_case ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' lowercase :Union[str, Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def __snake_case ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' lowercase :Optional[Any] = [self.sep_token_id] lowercase :Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __snake_case ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def __snake_case ( self : List[str] , snake_case__ : "Conversation" ): '''simple docstring''' lowercase :str = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(snake_case__ ) lowercase :Tuple = ''' '''.join(snake_case__ ) lowercase :Optional[int] = self.encode(snake_case__ ) if len(snake_case__ ) > self.model_max_length: lowercase :Optional[int] = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
172
1
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def __UpperCAmelCase ( a_: float, a_: float ): if inductance <= 0: raise ValueError("Inductance cannot be 0 or negative" ) elif capacitance <= 0: raise ValueError("Capacitance cannot be 0 or negative" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
145
'''simple docstring''' from __future__ import annotations __a = list[tuple[int, int]] __a = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __a = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : float , lowerCAmelCase__ : Node | None , ) -> List[str]: """simple docstring""" _UpperCAmelCase : List[str] = pos_x _UpperCAmelCase : List[Any] = pos_y _UpperCAmelCase : Optional[int] = (pos_y, pos_x) _UpperCAmelCase : Tuple = goal_x _UpperCAmelCase : List[str] = goal_y _UpperCAmelCase : str = g_cost _UpperCAmelCase : List[Any] = parent _UpperCAmelCase : str = self.calculate_heuristic() def _lowerCAmelCase ( self : str ) -> float: """simple docstring""" _UpperCAmelCase : Optional[Any] = abs(self.pos_x - self.goal_x ) _UpperCAmelCase : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Any , lowerCAmelCase__ : Optional[int] ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A__ : """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : tuple[int, int] , lowerCAmelCase__ : tuple[int, int] ) -> List[str]: """simple docstring""" _UpperCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase__ ) _UpperCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = [self.start] _UpperCAmelCase : list[Node] = [] _UpperCAmelCase : List[Any] = False def _lowerCAmelCase ( self : Tuple ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() _UpperCAmelCase : Dict = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: _UpperCAmelCase : List[str] = True return self.retrace_path(lowerCAmelCase__ ) self.closed_nodes.append(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = self.get_successors(lowerCAmelCase__ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCAmelCase__ ) else: # retrieve the best current path _UpperCAmelCase : List[Any] = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCAmelCase__ ) else: self.open_nodes.append(lowerCAmelCase__ ) if not self.reached: return [self.start.pos] return None def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Node ) -> list[Node]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = [] for action in delta: _UpperCAmelCase : Tuple = parent.pos_x + action[1] _UpperCAmelCase : Any = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCAmelCase__ , lowerCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase__ , ) ) return successors def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Node | None ) -> Path: """simple docstring""" _UpperCAmelCase : Optional[int] = node _UpperCAmelCase : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _UpperCAmelCase : Any = current_node.parent path.reverse() return path if __name__ == "__main__": __a = (0, 0) __a = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') __a = GreedyBestFirst(init, goal) __a = greedy_bf.search() if path: for pos_x, pos_y in path: __a = 2 for elem in grid: print(elem)
145
1
def lowerCamelCase_ ( _a , _a ): """simple docstring""" lowerCAmelCase__ : Optional[int] = int(_a ) # Initialize Result lowerCAmelCase__ : List[str] = [] # Traverse through all denomination for denomination in reversed(_a ): # Find denominations while int(_a ) >= int(_a ): total_value -= int(_a ) answer.append(_a ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCamelCase = [] lowerCamelCase = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): lowerCamelCase = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(f'''Denomination {i}: ''').strip())) lowerCamelCase = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter lowerCamelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCamelCase = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(f'''Following is minimal change for {value}: ''') lowerCamelCase = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
211
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) lowerCamelCase = OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) lowerCamelCase = OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) lowerCamelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) lowerCamelCase = OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) lowerCamelCase = OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) lowerCamelCase = OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) lowerCamelCase = OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) lowerCamelCase = OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) lowerCamelCase = OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) lowerCamelCase = OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) lowerCamelCase = OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) lowerCamelCase = OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) lowerCamelCase = OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowerCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowerCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowerCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowerCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowerCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowerCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowerCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _a ( _BaseAutoModelClass): _a : List[str] = FLAX_MODEL_MAPPING lowerCamelCase = auto_class_update(FlaxAutoModel) class _a ( _BaseAutoModelClass): _a : Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowerCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class _a ( _BaseAutoModelClass): _a : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowerCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class _a ( _BaseAutoModelClass): _a : Tuple = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowerCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class _a ( _BaseAutoModelClass): _a : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCamelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class _a ( _BaseAutoModelClass): _a : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCamelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class _a ( _BaseAutoModelClass): _a : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowerCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class _a ( _BaseAutoModelClass): _a : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class _a ( _BaseAutoModelClass): _a : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowerCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class _a ( _BaseAutoModelClass): _a : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowerCamelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class _a ( _BaseAutoModelClass): _a : List[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class _a ( _BaseAutoModelClass): _a : List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowerCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class _a ( _BaseAutoModelClass): _a : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowerCamelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
211
1
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets lowerCAmelCase: Tuple = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n' lowerCAmelCase: Optional[Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n' lowerCAmelCase: Optional[int] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__( datasets.Metric ): def lowercase_ ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def lowercase_ ( self : List[str] , __snake_case : List[str] , __snake_case : Dict , __snake_case : str=None , __snake_case : Tuple=True , __snake_case : int=False ): if rouge_types is None: a : Dict = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] a : Any = rouge_scorer.RougeScorer(rouge_types=__snake_case , use_stemmer=__snake_case ) if use_aggregator: a : List[Any] = scoring.BootstrapAggregator() else: a : List[Any] = [] for ref, pred in zip(__snake_case , __snake_case ): a : List[Any] = scorer.score(__snake_case , __snake_case ) if use_aggregator: aggregator.add_scores(__snake_case ) else: scores.append(__snake_case ) if use_aggregator: a : Optional[int] = aggregator.aggregate() else: a : Union[str, Any] = {} for key in scores[0]: a : str = [score[key] for score in scores] return result
297
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCamelCase__ ( _A = "laptop" ): a : Any = f"""https://www.amazon.in/laptop/s?k={product}""" a : Tuple = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text ) # Initialize a Pandas dataframe with the column titles a : Any = DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ): try: a : Optional[int] = item.ha.text a : str = 'https://www.amazon.in/' + item.ha.a['href'] a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text try: a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text except AttributeError: a : Union[str, Any] = 'Not available' try: a : str = ( '₹' + item.find( 'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: a : int = '' try: a : Union[str, Any] = float( ( ( float(product_mrp.strip('₹' ).replace(',' , '' ) ) - float(product_price.strip('₹' ).replace(',' , '' ) ) ) / float(product_mrp.strip('₹' ).replace(',' , '' ) ) ) * 100 ) except ValueError: a : Any = float('nan' ) except AttributeError: pass a : Any = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] a : Any = ' ' a : List[str] = ' ' data_frame.index += 1 return data_frame if __name__ == "__main__": lowerCAmelCase: str = 'headphones' get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
297
1
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging lowercase__ = logging.get_logger(__name__) def __a ( ) ->Optional[Any]: # Get the sagemaker specific mp parameters from smp_options variable. a__: Tuple = os.getenv('SM_HP_MP_PARAMETERS' , '{}' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. a__: Optional[int] = json.loads(_SCREAMING_SNAKE_CASE ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. a__: Optional[int] = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". a__: Any = json.loads(_SCREAMING_SNAKE_CASE ) if not mpi_options.get('sagemaker_mpi_enabled' , _SCREAMING_SNAKE_CASE ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('smdistributed' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class __snake_case ( __lowerCAmelCase ): a__ = field( default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , ) def lowerCamelCase_ ( self) -> str: '''simple docstring''' super().__post_init__() warnings.warn( '`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ' '`TrainingArguments` instead.' , lowercase , ) @cached_property def lowerCamelCase_ ( self) -> "torch.device": '''simple docstring''' logger.info('PyTorch: setting up devices') if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( 'torch.distributed process group is initialized, but local_rank == -1. ' 'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch') if self.no_cuda: a__: Union[str, Any] = torch.device('cpu') a__: Union[str, Any] = 0 elif is_sagemaker_model_parallel_available(): a__: str = smp.local_rank() a__: Union[str, Any] = torch.device('cuda' , lowercase) a__: Dict = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta) a__: Tuple = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK')) a__: Tuple = torch.device('cuda' , self.local_rank) a__: int = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 a__: Optional[int] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. a__: List[str] = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta) a__: List[Any] = torch.device('cuda' , self.local_rank) a__: List[str] = 1 if device.type == "cuda": torch.cuda.set_device(lowercase) return device @property def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def lowerCamelCase_ ( self) -> Tuple: '''simple docstring''' return not is_sagemaker_model_parallel_available() @property def lowerCamelCase_ ( self) -> List[str]: '''simple docstring''' return False
354
"""simple docstring""" def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple: _enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if n == 0: return 0 a__: List[Any] = float('-inf' ) for i in range(1 , n + 1 ): a__: Optional[Any] = max( _SCREAMING_SNAKE_CASE , prices[i - 1] + naive_cut_rod_recursive(n - i , _SCREAMING_SNAKE_CASE ) ) return max_revue def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str: _enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a__: str = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]: if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: a__: Dict = float('-inf' ) for i in range(1 , n + 1 ): a__: Optional[Any] = max( _SCREAMING_SNAKE_CASE , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , ) a__: Optional[int] = max_revenue return max_rev[n] def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict: _enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. a__: str = [float('-inf' ) for _ in range(n + 1 )] a__: Tuple = 0 for i in range(1 , n + 1 ): a__: List[str] = max_rev[i] for j in range(1 , i + 1 ): a__: Tuple = max(_SCREAMING_SNAKE_CASE , prices[j - 1] + max_rev[i - j] ) a__: Union[str, Any] = max_revenue_i return max_rev[n] def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any: if n < 0: a__: Optional[int] = F'n must be greater than or equal to 0. Got n = {n}' raise ValueError(_SCREAMING_SNAKE_CASE ) if n > len(_SCREAMING_SNAKE_CASE ): a__: List[str] = ( 'Each integral piece of rod must have a corresponding price. ' F'Got n = {n} but length of prices = {len(_SCREAMING_SNAKE_CASE )}' ) raise ValueError(_SCREAMING_SNAKE_CASE ) def __a ( ) ->str: a__: int = [6, 10, 12, 15, 20, 23] a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. a__: Any = 36 a__: Optional[int] = top_down_cut_rod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a__: List[Any] = bottom_up_cut_rod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a__: int = naive_cut_rod_recursive(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
203
0
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' lowercase = len(lowerCAmelCase__ ) lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): lowercase = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): lowercase = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: lowercase = subset[i - 1][j] if arr[i - 1] <= j: lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
101
import unittest from knapsack import knapsack as k class __a ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: List[Any] = 0 lowercase__: List[Any] = [0] lowercase__: str = [0] lowercase__: Tuple = len(lowerCAmelCase__ ) self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 ) lowercase__: Optional[Any] = [60] lowercase__: Dict = [10] lowercase__: str = len(lowerCAmelCase__ ) self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 ) def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]: '''simple docstring''' lowercase__: Union[str, Any] = 3 lowercase__: List[str] = [1, 2, 3] lowercase__: Union[str, Any] = [3, 2, 1] lowercase__: Union[str, Any] = len(lowerCAmelCase__ ) self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 5 ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' lowercase__: Optional[Any] = 50 lowercase__: str = [60, 100, 120] lowercase__: Any = [10, 20, 30] lowercase__: List[Any] = len(lowerCAmelCase__ ) self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 220 ) if __name__ == "__main__": unittest.main()
196
0
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowercase__ = 3 def UpperCamelCase( UpperCAmelCase_ ): print('Generating primitive root of p' ) while True: UpperCAmelCase : Union[str, Any] = random.randrange(3 , UpperCAmelCase_ ) if pow(UpperCAmelCase_ , 2 , UpperCAmelCase_ ) == 1: continue if pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) == 1: continue return g def UpperCamelCase( UpperCAmelCase_ ): print('Generating prime p...' ) UpperCAmelCase : str = rabin_miller.generate_large_prime(UpperCAmelCase_ ) # select large prime number. UpperCAmelCase : List[str] = primitive_root(UpperCAmelCase_ ) # one primitive root on modulo p. UpperCAmelCase : List[Any] = random.randrange(3 , UpperCAmelCase_ ) # private_key -> have to be greater than 2 for safety. UpperCAmelCase : List[Any] = cryptomath.find_mod_inverse(pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ) UpperCAmelCase : Tuple = (key_size, e_a, e_a, p) UpperCAmelCase : Optional[int] = (key_size, d) return public_key, private_key def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ): print('\nWARNING:' ) print( F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" 'Use a different name or delete these files and re-run this program.' ) sys.exit() UpperCAmelCase : Dict = generate_key(UpperCAmelCase_ ) print(F"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(F"""{name}_pubkey.txt""" , 'w' ) as fo: fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(F"""Writing private key to file {name}_privkey.txt...""" ) with open(F"""{name}_privkey.txt""" , 'w' ) as fo: fo.write(F"""{private_key[0]},{private_key[1]}""" ) def UpperCamelCase( ): print('Making key files...' ) make_key_files('elgamal' , 20_48 ) print('Key files generation successful' ) if __name__ == "__main__": main()
352
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A_ : '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=4 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[Any]=7 , lowercase_ : Any=True , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : str=99 , lowercase_ : str=36 , lowercase_ : int=3 , lowercase_ : int=4 , lowercase_ : Any=37 , lowercase_ : str="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=512 , lowercase_ : int=16 , lowercase_ : Dict=2 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=6 , lowercase_ : Tuple=6 , lowercase_ : Any=3 , lowercase_ : Dict=4 , lowercase_ : Any=None , lowercase_ : Tuple=1_000 , ) -> Tuple: UpperCAmelCase : List[Any] = parent UpperCAmelCase : List[Any] = batch_size UpperCAmelCase : List[Any] = num_channels UpperCAmelCase : Optional[int] = image_size UpperCAmelCase : int = patch_size UpperCAmelCase : Tuple = text_seq_length UpperCAmelCase : int = is_training UpperCAmelCase : Any = use_input_mask UpperCAmelCase : Optional[int] = use_token_type_ids UpperCAmelCase : int = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : List[str] = hidden_size UpperCAmelCase : Any = num_hidden_layers UpperCAmelCase : str = num_attention_heads UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : Any = attention_probs_dropout_prob UpperCAmelCase : Tuple = max_position_embeddings UpperCAmelCase : List[str] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : Optional[int] = coordinate_size UpperCAmelCase : Optional[int] = shape_size UpperCAmelCase : str = num_labels UpperCAmelCase : str = num_choices UpperCAmelCase : int = scope UpperCAmelCase : Tuple = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCAmelCase : Any = text_seq_length UpperCAmelCase : int = (image_size // patch_size) ** 2 + 1 UpperCAmelCase : Optional[int] = self.text_seq_length + self.image_seq_length def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase : int = bbox[i, j, 3] UpperCAmelCase : List[Any] = bbox[i, j, 1] UpperCAmelCase : Union[str, Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase : Tuple = bbox[i, j, 2] UpperCAmelCase : List[str] = bbox[i, j, 0] UpperCAmelCase : List[str] = t UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : int = None if self.use_input_mask: UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCAmelCase : int = None if self.use_token_type_ids: UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCAmelCase : Dict = None UpperCAmelCase : Dict = None if self.use_labels: UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCAmelCase : str = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase_ ( self : str , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any] ) -> Any: UpperCAmelCase : Dict = LayoutLMvaModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() # text + image UpperCAmelCase : Optional[Any] = model(lowercase_ , pixel_values=lowercase_ ) UpperCAmelCase : str = model( lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ ) UpperCAmelCase : Any = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_ ) UpperCAmelCase : str = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCAmelCase : List[Any] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCAmelCase : Dict = model(pixel_values=lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Union[str, Any] = LayoutLMvaForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase : int = model( lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : str , lowercase_ : Any , lowercase_ : int , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int ) -> Any: UpperCAmelCase : Optional[int] = self.num_labels UpperCAmelCase : int = LayoutLMvaForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase : Optional[Any] = model( lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[int] ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = LayoutLMvaForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase : List[str] = model( lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self : str ) -> List[Any]: UpperCAmelCase : List[str] = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : Optional[int] = config_and_inputs UpperCAmelCase : Optional[Any] = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class A_ ( _snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Tuple = False UpperCAmelCase_ : Dict = False UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : List[str] = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) UpperCAmelCase_ : List[str] = ( {"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel} if is_torch_available() else {} ) def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> Union[str, Any]: # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def UpperCAmelCase_ ( self : str ) -> Any: UpperCAmelCase : Union[str, Any] = LayoutLMvaModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def UpperCAmelCase_ ( self : Tuple , lowercase_ : int , lowercase_ : Dict , lowercase_ : Any=False ) -> Optional[Any]: UpperCAmelCase : str = copy.deepcopy(lowercase_ ) if model_class in get_values(lowercase_ ): UpperCAmelCase : str = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(lowercase_ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowercase_ ): UpperCAmelCase : Dict = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) elif model_class in get_values(lowercase_ ): UpperCAmelCase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) UpperCAmelCase : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) elif model_class in [ *get_values(lowercase_ ), ]: UpperCAmelCase : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) elif model_class in [ *get_values(lowercase_ ), ]: UpperCAmelCase : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , ) return inputs_dict def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : Any ) -> Any: UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase : Optional[int] = type self.model_tester.create_and_check_model(*lowercase_ ) def UpperCAmelCase_ ( self : Optional[int] ) -> str: UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_ ) def UpperCAmelCase_ ( self : Dict ) -> Any: UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) def UpperCAmelCase_ ( self : Optional[int] ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) @slow def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : int = LayoutLMvaModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def UpperCamelCase( ): UpperCAmelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class A_ ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self : List[Any] ) -> Dict: return LayoutLMvaImageProcessor(apply_ocr=lowercase_ ) if is_vision_available() else None @slow def UpperCAmelCase_ ( self : Tuple ) -> List[str]: UpperCAmelCase : int = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowercase_ ) UpperCAmelCase : Dict = self.default_image_processor UpperCAmelCase : str = prepare_img() UpperCAmelCase : Optional[Any] = image_processor(images=lowercase_ , return_tensors='pt' ).pixel_values.to(lowercase_ ) UpperCAmelCase : int = torch.tensor([[1, 2]] ) UpperCAmelCase : Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass UpperCAmelCase : Dict = model( input_ids=input_ids.to(lowercase_ ) , bbox=bbox.to(lowercase_ ) , pixel_values=pixel_values.to(lowercase_ ) , ) # verify the logits UpperCAmelCase : Optional[Any] = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , lowercase_ ) UpperCAmelCase : List[str] = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1E-4 ) )
280
0
"""simple docstring""" def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: '''simple docstring''' __snake_case : Dict = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __snake_case : Dict = n - k # Calculate C(n,k) for i in range(UpperCAmelCase_ ): result *= n - i result //= i + 1 return result def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> int: '''simple docstring''' return binomial_coefficient(2 * node_count , UpperCAmelCase_ ) // (node_count + 1) def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> int: '''simple docstring''' if n < 0: raise ValueError('factorial() not defined for negative values' ) __snake_case : str = 1 for i in range(1 , n + 1 ): result *= i return result def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> int: '''simple docstring''' return catalan_number(UpperCAmelCase_ ) * factorial(UpperCAmelCase_ ) if __name__ == "__main__": _a : Union[str, Any]= int(input("Enter the number of nodes: ").strip() or 0) if node_count <= 0: raise ValueError("We need some nodes to work with.") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
172
"""simple docstring""" def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Optional[int]: '''simple docstring''' __snake_case : List[str] = [] __snake_case : Optional[Any] = set({'(', '[', '{'} ) __snake_case : Union[str, Any] = set({')', ']', '}'} ) __snake_case : Tuple = {'{': '}', '[': ']', '(': ')'} for i in range(len(UpperCAmelCase_ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(UpperCAmelCase_ ) == 0 or (len(UpperCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(UpperCAmelCase_ ) == 0 def __UpperCAmelCase ( ) -> Any: '''simple docstring''' __snake_case : Optional[Any] = input('Enter sequence of brackets: ' ) if is_balanced(UpperCAmelCase_ ): print(UpperCAmelCase_ , 'is balanced' ) else: print(UpperCAmelCase_ , 'is not balanced' ) if __name__ == "__main__": main()
172
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = tempfile.mkdtemp() # fmt: off __a : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on __a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) __a : Optional[Any] = { 'do_resize': True, 'size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } __a : Any = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(__a , __a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __a : str = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.get_tokenizer() __a : int = self.get_image_processor() __a : Tuple = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) processor.save_pretrained(self.tmpdirname ) __a : List[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __a : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : Any = self.get_image_processor(do_normalize=__a , padding_value=1.0 ) __a : int = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_image_processor() __a : Dict = self.get_tokenizer() __a : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __a : Union[str, Any] = self.prepare_image_inputs() __a : Optional[int] = image_processor(__a , return_tensors='np' ) __a : Tuple = processor(images=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.get_image_processor() __a : Tuple = self.get_tokenizer() __a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __a : List[str] = 'lower newer' __a : Any = processor(text=__a ) __a : Tuple = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.get_image_processor() __a : int = self.get_tokenizer() __a : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __a : Tuple = 'lower newer' __a : Optional[Any] = self.prepare_image_inputs() __a : Optional[Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(__a ): processor() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_image_processor() __a : List[Any] = self.get_tokenizer() __a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __a : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a : Union[str, Any] = processor.batch_decode(__a ) __a : Any = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.get_image_processor() __a : List[Any] = self.get_tokenizer() __a : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __a : int = 'lower newer' __a : Any = self.prepare_image_inputs() __a : Optional[Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
356
'''simple docstring''' import os def lowerCamelCase (): with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file: __a : List[Any] = str(file.readlines()[0] ) __a : str = names.replace('"' , '' ).split(',' ) names.sort() __a : Union[str, Any] = 0 __a : Tuple = 0 for i, name in enumerate(_SCREAMING_SNAKE_CASE ): for letter in name: name_score += ord(_SCREAMING_SNAKE_CASE ) - 64 total_score += (i + 1) * name_score __a : Any = 0 return total_score if __name__ == "__main__": print(solution())
294
0
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase (__A , __A , __A): """simple docstring""" _a = AutoConfig.from_pretrained(__A) _a = FlaxAutoModelForSeqaSeqLM.from_config(config=__A) _a = checkpoints.load_tax_checkpoint(__A) _a = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": _a = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": _a = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''') # Encoder for layer_index in range(config.num_layers): _a = F'''layers_{str(__A)}''' # Self-Attention _a = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] _a = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] _a = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] _a = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization _a = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: _a = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] _a = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: _a = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] _a = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization _a = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning _a = flax_model.params['''encoder''']['''block'''][str(__A)]['''layer'''] _a = tax_attention_key _a = tax_attention_out _a = tax_attention_query _a = tax_attention_value _a = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a = tax_global_layer_norm if split_mlp_wi: _a = tax_mlp_wi_a _a = tax_mlp_wi_a else: _a = tax_mlp_wi _a = tax_mlp_wo _a = tax_mlp_layer_norm _a = flax_model_encoder_layer_block # Only for layer 0: _a = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T _a = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T _a = tax_encoder_global_rel_embedding # Assigning _a = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] _a = tax_encoder_norm # Decoder for layer_index in range(config.num_layers): _a = F'''layers_{str(__A)}''' # Self-Attention _a = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] _a = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] _a = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] _a = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization _a = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention _a = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] _a = tax_enc_dec_attention_module['''key''']['''kernel'''] _a = tax_enc_dec_attention_module['''out''']['''kernel'''] _a = tax_enc_dec_attention_module['''query''']['''kernel'''] _a = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization _a = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: _a = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] _a = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: _a = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] _a = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization _a = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning _a = flax_model.params['''decoder''']['''block'''][str(__A)]['''layer'''] _a = tax_attention_key _a = tax_attention_out _a = tax_attention_query _a = tax_attention_value _a = tax_pre_attention_layer_norm _a = tax_enc_dec_attention_key _a = tax_enc_dec_attention_out _a = tax_enc_dec_attention_query _a = tax_enc_dec_attention_value _a = tax_cross_layer_norm if split_mlp_wi: _a = tax_mlp_wi_a _a = tax_mlp_wi_a else: _a = tax_mlp_wi _a = tax_mlp_wo _a = txa_mlp_layer_norm _a = flax_model_decoder_layer_block # Decoder Normalization _a = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] _a = txa_decoder_norm # Only for layer 0: _a = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T _a = tax_decoder_rel_embedding # Token Embeddings _a = tax_model['''target''']['''token_embedder''']['''embedding'''] _a = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _a = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(__A) print('''T5X Model was sucessfully converted!''') if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) lowercase_ = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
211
'''simple docstring''' from __future__ import annotations from typing import TypedDict class __A ( A ): '''simple docstring''' __lowerCamelCase : str __lowerCamelCase : int def lowerCAmelCase (__A): """simple docstring""" if not isinstance(__A , __A): raise TypeError('''The parameter s type must be str.''') return [s[i:] + s[:i] for i in range(len(__A))] def lowerCAmelCase (__A): """simple docstring""" if not isinstance(__A , __A): raise TypeError('''The parameter s type must be str.''') if not s: raise ValueError('''The parameter s must not be empty.''') _a = all_rotations(__A) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _a = { "bwt_string": "".join([word[-1] for word in rotations]), "idx_original_string": rotations.index(__A), } return response def lowerCAmelCase (__A , __A): """simple docstring""" if not isinstance(__A , __A): raise TypeError('''The parameter bwt_string type must be str.''') if not bwt_string: raise ValueError('''The parameter bwt_string must not be empty.''') try: _a = int(__A) except ValueError: raise TypeError( '''The parameter idx_original_string type must be int or passive''' ''' of cast to int.''') if idx_original_string < 0: raise ValueError('''The parameter idx_original_string must not be lower than 0.''') if idx_original_string >= len(__A): raise ValueError( '''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''') _a = [''''''] * len(__A) for _ in range(len(__A)): for i in range(len(__A)): _a = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowercase_ = "Provide a string that I will generate its BWT transform: " lowercase_ = input(entry_msg).strip() lowercase_ = bwt_transform(s) print( F"""Burrows Wheeler transform for string '{s}' results """ F"""in '{result['bwt_string']}'""" ) lowercase_ = reverse_bwt(result["bwt_string"], result["idx_original_string"]) print( F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """ F"""we get original string '{original_string}'""" )
211
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['''YolosFeatureExtractor'''] __a = ['''YolosImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''YolosForObjectDetection''', '''YolosModel''', '''YolosPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
354
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) _UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
17
0
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _snake_case ( snake_case_ ): def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[int] ): with open(UpperCamelCase__ , encoding="utf-8" ) as input_file: __lowerCamelCase : int = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) __lowerCamelCase : List[str] = input_file.read() __lowerCamelCase : List[Any] = regexp.search(UpperCamelCase__ ) return match def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Dict ): with open(UpperCamelCase__ , encoding="utf-8" ) as input_file: __lowerCamelCase : Optional[int] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) __lowerCamelCase : Dict = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` __lowerCamelCase : str = regexp.finditer(UpperCamelCase__ ) __lowerCamelCase : Optional[Any] = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def lowerCamelCase__ ( self : int ): __lowerCamelCase : int = Path("./datasets" ) __lowerCamelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(UpperCamelCase__ ) ): raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" ) def lowerCamelCase__ ( self : int ): __lowerCamelCase : Any = Path("./datasets" ) __lowerCamelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(UpperCamelCase__ ) ): raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
135
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class _lowerCAmelCase ( snake_case_ ): def lowerCamelCase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if tokenize_kwargs is None: snake_case : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( "truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" ) snake_case : List[str] = truncation snake_case : Union[str, Any] = tokenize_kwargs snake_case : List[Any] = {} if return_tensors is not None: snake_case : Tuple = return_tensors return preprocess_params, {}, postprocess_params def lowerCamelCase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict[str, GenericTensor]: '''simple docstring''' snake_case : List[Any] = self.framework snake_case : str = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' snake_case : int = self.model(**UpperCamelCase__ ) return model_outputs def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
203
0
import unittest from knapsack import knapsack as k class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : Dict = 0 _SCREAMING_SNAKE_CASE : Optional[int] = [0] _SCREAMING_SNAKE_CASE : Tuple = [0] _SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 0 ) _SCREAMING_SNAKE_CASE : Tuple = [6_0] _SCREAMING_SNAKE_CASE : Dict = [1_0] _SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase ) self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 0 ) def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = 3 _SCREAMING_SNAKE_CASE : Tuple = [1, 2, 3] _SCREAMING_SNAKE_CASE : str = [3, 2, 1] _SCREAMING_SNAKE_CASE : int = len(__lowerCamelCase ) self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 5 ) def UpperCamelCase_ ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = 5_0 _SCREAMING_SNAKE_CASE : Optional[Any] = [6_0, 1_0_0, 1_2_0] _SCREAMING_SNAKE_CASE : Any = [1_0, 2_0, 3_0] _SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase ) self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 2_2_0 ) if __name__ == "__main__": unittest.main()
325
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCamelCase__ =logging.get_logger(__name__) class lowerCAmelCase__( __lowercase ): '''simple docstring''' def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None: warnings.warn( "The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use SegformerImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
325
1
"""simple docstring""" def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = int(lowerCamelCase ) # Initialize Result UpperCAmelCase__ = [] # Traverse through all denomination for denomination in reversed(lowerCamelCase ): # Find denominations while int(lowerCamelCase ) >= int(lowerCamelCase ): total_value -= int(lowerCamelCase ) answer.append(lowerCamelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[int] = '''0''' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): lowerCAmelCase__ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) lowerCAmelCase__ : int = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase__ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 500, 2_000] lowerCAmelCase__ : Tuple = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(F"""Following is minimal change for {value}: """) lowerCAmelCase__ : Optional[int] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
98
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : Optional[int] = { '''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''], '''tokenization_xlm''': ['''XLMTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ '''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMForMultipleChoice''', '''XLMForQuestionAnswering''', '''XLMForQuestionAnsweringSimple''', '''XLMForSequenceClassification''', '''XLMForTokenClassification''', '''XLMModel''', '''XLMPreTrainedModel''', '''XLMWithLMHeadModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ '''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMForMultipleChoice''', '''TFXLMForQuestionAnsweringSimple''', '''TFXLMForSequenceClassification''', '''TFXLMForTokenClassification''', '''TFXLMMainLayer''', '''TFXLMModel''', '''TFXLMPreTrainedModel''', '''TFXLMWithLMHeadModel''', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
280
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class __a ( __UpperCamelCase ): __snake_case : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
352
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __a : def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : str=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : Any=4 , UpperCAmelCase : int=4 , UpperCAmelCase : str=4 , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[Any]=5_12 , UpperCAmelCase : List[str]=0.02 , ): lowerCAmelCase_ : List[Any] = parent lowerCAmelCase_ : Union[str, Any] = batch_size lowerCAmelCase_ : Dict = seq_length lowerCAmelCase_ : Optional[Any] = is_training lowerCAmelCase_ : Optional[int] = use_input_mask lowerCAmelCase_ : Optional[Any] = use_token_type_ids lowerCAmelCase_ : Optional[Any] = use_labels lowerCAmelCase_ : Any = vocab_size lowerCAmelCase_ : Tuple = hidden_size lowerCAmelCase_ : Any = rotary_dim lowerCAmelCase_ : str = num_hidden_layers lowerCAmelCase_ : int = num_attention_heads lowerCAmelCase_ : Any = intermediate_size lowerCAmelCase_ : Dict = hidden_act lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase_ : Optional[Any] = max_position_embeddings lowerCAmelCase_ : Union[str, Any] = initializer_range lowerCAmelCase_ : int = None lowerCAmelCase_ : Union[str, Any] = vocab_size - 1 lowerCAmelCase_ : str = vocab_size - 1 lowerCAmelCase_ : Optional[int] = vocab_size - 1 def A ( self : List[Any] ): lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ : Optional[int] = None if self.use_input_mask: lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ : Optional[int] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def A ( self : str ): lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs lowerCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple ): lowerCAmelCase_ : str = 20 lowerCAmelCase_ : Dict = model_class_name(UpperCAmelCase ) lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase ) lowerCAmelCase_ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) lowerCAmelCase_ : Tuple = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCAmelCase_ : Dict = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , ) lowerCAmelCase_ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCAmelCase_ : List[str] = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , ) lowerCAmelCase_ : Any = model(UpperCAmelCase ) lowerCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' ) def A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any ): lowerCAmelCase_ : int = 20 lowerCAmelCase_ : List[Any] = model_class_name(UpperCAmelCase ) lowerCAmelCase_ : Tuple = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase ) lowerCAmelCase_ : Dict = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCAmelCase_ : Tuple = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , ) lowerCAmelCase_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCAmelCase_ : Tuple = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , ) lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase ) lowerCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' ) @require_flax class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ): __snake_case : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __snake_case : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else () def A ( self : Any ): lowerCAmelCase_ : List[str] = FlaxGPTJModelTester(self ) def A ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def A ( self : Tuple ): for model_class_name in self.all_model_classes: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) @tooslow def A ( self : int ): lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) lowerCAmelCase_ : Tuple = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase , truncation=UpperCAmelCase ) lowerCAmelCase_ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[Any] = model.config.eos_token_id lowerCAmelCase_ : List[Any] = jax.jit(model.generate ) lowerCAmelCase_ : Any = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences lowerCAmelCase_ : str = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) lowerCAmelCase_ : Optional[int] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @is_pt_flax_cross_test def A ( self : Optional[Any] ): lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) lowerCAmelCase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCAmelCase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCAmelCase_ : Dict = getattr(UpperCAmelCase , UpperCAmelCase ) lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pt_inputs["""input_ids"""].shape lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase ): lowerCAmelCase_ : Optional[Any] = 0 lowerCAmelCase_ : Any = 1 lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Tuple = pt_model_class(UpperCAmelCase ).eval() lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase , dtype=jnp.floataa ) lowerCAmelCase_ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase ) lowerCAmelCase_ : List[str] = fx_state with torch.no_grad(): lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple() lowerCAmelCase_ : int = fx_model(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase ) lowerCAmelCase_ : Optional[int] = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase ) lowerCAmelCase_ : Union[str, Any] = fx_model_loaded(**UpperCAmelCase ).to_tuple() self.assertEqual( len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def A ( self : Optional[Any] ): lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCAmelCase_ : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) lowerCAmelCase_ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCAmelCase_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCAmelCase_ : Any = getattr(UpperCAmelCase , UpperCAmelCase ) lowerCAmelCase_ : str = pt_model_class(UpperCAmelCase ).eval() lowerCAmelCase_ : Any = model_class(UpperCAmelCase , dtype=jnp.floataa ) lowerCAmelCase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params ) lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pt_inputs["""input_ids"""].shape lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase ): lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : Optional[int] = 1 lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : str = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple() lowerCAmelCase_ : Tuple = fx_model(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase ) lowerCAmelCase_ : Optional[Any] = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase ) with torch.no_grad(): lowerCAmelCase_ : Dict = pt_model_loaded(**UpperCAmelCase ).to_tuple() self.assertEqual( len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def A ( self : str ): for model_class_name in self.all_model_classes: lowerCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) lowerCAmelCase_ : Optional[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase )
28
0
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' while b: _a , _a : str = b, a % b return a def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase__ , a % b ) def lowerCAmelCase__ ( ): '''simple docstring''' print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
294
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ): '''simple docstring''' _a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: _a : Tuple = 1 - (matter_density + radiation_density + dark_energy) _a : int = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) _a : List[str] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _snake_case = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
294
1
"""simple docstring""" import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[int] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase__ : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) UpperCAmelCase__ : int = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def _a (self ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = F"""\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n """.split() UpperCAmelCase__ : Union[str, Any] = [sys.executable] + distributed_args execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
361
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def a__ ( lowerCAmelCase ) -> Tuple: UpperCAmelCase__ : Optional[int] = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): UpperCAmelCase__ : Dict = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): UpperCAmelCase__ : int = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 UpperCAmelCase__ : Optional[int] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] UpperCAmelCase__ : Union[str, Any] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowerCAmelCase )-1}""" ) if "norm" in key: UpperCAmelCase__ : Optional[Any] = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 UpperCAmelCase__ : int = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] UpperCAmelCase__ : Union[str, Any] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowerCAmelCase )-1}""" ) if "layer_norm1" in key: UpperCAmelCase__ : Any = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: UpperCAmelCase__ : Union[str, Any] = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 UpperCAmelCase__ : int = key[key.find("""block""" ) + len("""block""" )] UpperCAmelCase__ : List[Any] = key.replace(F"""block{idx}""" , F"""block.{int(lowerCAmelCase )-1}""" ) if "attn.q" in key: UpperCAmelCase__ : List[Any] = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: UpperCAmelCase__ : Tuple = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: UpperCAmelCase__ : Union[str, Any] = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: UpperCAmelCase__ : int = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: UpperCAmelCase__ : List[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: UpperCAmelCase__ : Optional[Any] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: UpperCAmelCase__ : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) UpperCAmelCase__ : Optional[Any] = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 UpperCAmelCase__ : List[Any] = key[key.find("""linear_c""" ) + len("""linear_c""" )] UpperCAmelCase__ : int = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowerCAmelCase )-1}""" ) if "bot_conv" in key: UpperCAmelCase__ : int = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: UpperCAmelCase__ : List[Any] = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: UpperCAmelCase__ : List[Any] = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: UpperCAmelCase__ : Optional[Any] = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: UpperCAmelCase__ : List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: UpperCAmelCase__ : int = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: UpperCAmelCase__ : Union[str, Any] = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): UpperCAmelCase__ : Optional[int] = key.replace("""module.last_layer_depth""" , """head.head""" ) UpperCAmelCase__ : Optional[Any] = value return new_state_dict def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Dict: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) UpperCAmelCase__ : Dict = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) UpperCAmelCase__ : int = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict UpperCAmelCase__ : Optional[int] = kv_weight[ : config.hidden_sizes[i], : ] UpperCAmelCase__ : int = kv_bias[: config.hidden_sizes[i]] UpperCAmelCase__ : int = kv_weight[ config.hidden_sizes[i] :, : ] UpperCAmelCase__ : List[Any] = kv_bias[config.hidden_sizes[i] :] def a__ ( ) -> int: UpperCAmelCase__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase__ : int = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ) return image @torch.no_grad() def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=None ) -> Union[str, Any]: UpperCAmelCase__ : Any = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) UpperCAmelCase__ : Any = GLPNImageProcessor() # prepare image UpperCAmelCase__ : List[str] = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict UpperCAmelCase__ : Tuple = torch.load(lowerCAmelCase , map_location=torch.device("""cpu""" ) ) # rename keys UpperCAmelCase__ : Optional[Any] = rename_keys(lowerCAmelCase ) # key and value matrices need special treatment read_in_k_v(lowerCAmelCase , lowerCAmelCase ) # create HuggingFace model and load state dict UpperCAmelCase__ : Union[str, Any] = GLPNForDepthEstimation(lowerCAmelCase ) model.load_state_dict(lowerCAmelCase ) model.eval() # forward pass UpperCAmelCase__ : Any = model(lowerCAmelCase ) UpperCAmelCase__ : Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: UpperCAmelCase__ : int = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F"""Unknown model name: {model_name}""" ) UpperCAmelCase__ : Any = torch.Size([1, 4_80, 6_40] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase , ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) _A = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
166
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "naver-clova-ix/donut-base-finetuned-docvqa" A_ = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) A_ = "document_qa" A_ = AutoProcessor A_ = VisionEncoderDecoderModel A_ = ["image", "text"] A_ = ["text"] def __init__( self , *__a , **__a ): '''simple docstring''' if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__a , **__a ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : List[str] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __a : Any = task_prompt.replace('{user_input}' , __a ) __a : Optional[Any] = self.pre_processor.tokenizer( __a , add_special_tokens=__a , return_tensors='pt' ).input_ids __a : Optional[int] = self.pre_processor(__a , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Union[str, Any] = self.pre_processor.batch_decode(__a )[0] __a : str = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __a : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __a : Any = re.sub(r'<.*?>' , '' , __a , count=1 ).strip() # remove first task start token __a : Tuple = self.pre_processor.tokenajson(__a ) return sequence["answer"]
27
"""simple docstring""" def _A ( UpperCamelCase_ : Any) -> List[str]: '''simple docstring''' __lowercase ,__lowercase = [], [] while len(UpperCamelCase_) > 1: __lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_) start.append(UpperCamelCase_) end.append(UpperCamelCase_) collection.remove(UpperCamelCase_) collection.remove(UpperCamelCase_) end.reverse() return start + collection + end if __name__ == "__main__": _a = input('Enter numbers separated by a comma:\n').strip() _a = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
17
0
"""simple docstring""" import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(f'Building PyTorch model from configuration: {config}' ) _UpperCAmelCase : Optional[Any] = AlbertForPreTraining(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tf_weights_in_albert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _lowerCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
364
"""simple docstring""" import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { "google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json", "google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json", "google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json", } class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : int = 'owlvit_text_model' def __init__( self : int , A : int=4_9_4_0_8 , A : Optional[Any]=5_1_2 , A : Optional[Any]=2_0_4_8 , A : str=1_2 , A : int=8 , A : Tuple=1_6 , A : List[Any]="quick_gelu" , A : Tuple=1e-5 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : str=1.0 , A : str=0 , A : List[str]=4_9_4_0_6 , A : str=4_9_4_0_7 , **A : Optional[Any] , ): super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A ) _UpperCAmelCase : Union[str, Any] = vocab_size _UpperCAmelCase : str = hidden_size _UpperCAmelCase : List[Any] = intermediate_size _UpperCAmelCase : Any = num_hidden_layers _UpperCAmelCase : str = num_attention_heads _UpperCAmelCase : List[str] = max_position_embeddings _UpperCAmelCase : List[Any] = hidden_act _UpperCAmelCase : Tuple = layer_norm_eps _UpperCAmelCase : List[str] = attention_dropout _UpperCAmelCase : Optional[Any] = initializer_range _UpperCAmelCase : List[Any] = initializer_factor @classmethod def snake_case_ ( cls : Any , A : Union[str, os.PathLike] , **A : Dict ): cls._set_token_in_kwargs(A ) _UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(A , **A ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": _UpperCAmelCase : int = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A , **A ) class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : Tuple = 'owlvit_vision_model' def __init__( self : Union[str, Any] , A : Optional[int]=7_6_8 , A : int=3_0_7_2 , A : List[str]=1_2 , A : List[str]=1_2 , A : Optional[int]=3 , A : Optional[int]=7_6_8 , A : str=3_2 , A : Tuple="quick_gelu" , A : Dict=1e-5 , A : Optional[int]=0.0 , A : List[Any]=0.02 , A : str=1.0 , **A : Tuple , ): super().__init__(**A ) _UpperCAmelCase : List[str] = hidden_size _UpperCAmelCase : Tuple = intermediate_size _UpperCAmelCase : Optional[Any] = num_hidden_layers _UpperCAmelCase : Dict = num_attention_heads _UpperCAmelCase : Optional[Any] = num_channels _UpperCAmelCase : Union[str, Any] = image_size _UpperCAmelCase : Dict = patch_size _UpperCAmelCase : List[str] = hidden_act _UpperCAmelCase : Union[str, Any] = layer_norm_eps _UpperCAmelCase : Any = attention_dropout _UpperCAmelCase : Tuple = initializer_range _UpperCAmelCase : Tuple = initializer_factor @classmethod def snake_case_ ( cls : Optional[int] , A : Union[str, os.PathLike] , **A : int ): cls._set_token_in_kwargs(A ) _UpperCAmelCase , _UpperCAmelCase : Dict = cls.get_config_dict(A , **A ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": _UpperCAmelCase : Tuple = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A , **A ) class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : List[str] = 'owlvit' __SCREAMING_SNAKE_CASE : Optional[Any] = True def __init__( self : Optional[Any] , A : Dict=None , A : Tuple=None , A : Optional[Any]=5_1_2 , A : Optional[Any]=2.6_592 , A : int=True , **A : Tuple , ): super().__init__(**A ) if text_config is None: _UpperCAmelCase : List[Any] = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: _UpperCAmelCase : Tuple = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) _UpperCAmelCase : str = OwlViTTextConfig(**A ) _UpperCAmelCase : int = OwlViTVisionConfig(**A ) _UpperCAmelCase : Optional[Any] = projection_dim _UpperCAmelCase : str = logit_scale_init_value _UpperCAmelCase : Optional[Any] = return_dict _UpperCAmelCase : str = 1.0 @classmethod def snake_case_ ( cls : Dict , A : Union[str, os.PathLike] , **A : Any ): cls._set_token_in_kwargs(A ) _UpperCAmelCase , _UpperCAmelCase : str = cls.get_config_dict(A , **A ) if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A , **A ) @classmethod def snake_case_ ( cls : Optional[int] , A : Dict , A : Dict , **A : Optional[Any] ): _UpperCAmelCase : Optional[Any] = {} _UpperCAmelCase : int = text_config _UpperCAmelCase : Dict = vision_config return cls.from_dict(A , **A ) def snake_case_ ( self : Optional[int] ): _UpperCAmelCase : str = copy.deepcopy(self.__dict__ ) _UpperCAmelCase : Optional[int] = self.text_config.to_dict() _UpperCAmelCase : Optional[int] = self.vision_config.to_dict() _UpperCAmelCase : List[Any] = self.__class__.model_type return output class UpperCAmelCase_ ( _UpperCamelCase ): @property def snake_case_ ( self : List[str] ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def snake_case_ ( self : Optional[int] ): return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def snake_case_ ( self : str ): return 1e-4 def snake_case_ ( self : str , A : "ProcessorMixin" , A : int = -1 , A : int = -1 , A : Optional["TensorType"] = None , ): _UpperCAmelCase : Optional[Any] = super().generate_dummy_inputs( processor.tokenizer , batch_size=A , seq_length=A , framework=A ) _UpperCAmelCase : Union[str, Any] = super().generate_dummy_inputs( processor.image_processor , batch_size=A , framework=A ) return {**text_input_dict, **image_input_dict} @property def snake_case_ ( self : List[Any] ): return 1_4
202
0
import unittest from knapsack import knapsack as k class A__ ( unittest.TestCase ): def a__ ( self : Optional[int] ) -> Any: """simple docstring""" __lowercase = 0 __lowercase = [0] __lowercase = [0] __lowercase = len(_UpperCAmelCase ) self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 ) __lowercase = [60] __lowercase = [10] __lowercase = len(_UpperCAmelCase ) self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 ) def a__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = 3 __lowercase = [1, 2, 3] __lowercase = [3, 2, 1] __lowercase = len(_UpperCAmelCase ) self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 5 ) def a__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase = 50 __lowercase = [60, 1_00, 1_20] __lowercase = [10, 20, 30] __lowercase = len(_UpperCAmelCase ) self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 2_20 ) if __name__ == "__main__": unittest.main()
325
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE__ = """RegNetConfig""" # Base docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = """tabby, tabby cat""" SCREAMING_SNAKE_CASE__ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A__ ( nn.Module ): def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = nn.Convad( _UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) __lowercase = ACTaFN[activation] if activation is not None else nn.Identity() def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any: """simple docstring""" super().__init__() __lowercase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) __lowercase = config.num_channels def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __lowercase = self.embedder(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: """simple docstring""" super().__init__() __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) __lowercase = nn.Sequential( nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , ) def a__ ( self : str , _UpperCAmelCase : Dict ) -> str: """simple docstring""" __lowercase = self.pooler(_UpperCAmelCase ) __lowercase = self.attention(_UpperCAmelCase ) __lowercase = hidden_state * attention return hidden_state class A__ ( nn.Module ): def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict: """simple docstring""" super().__init__() __lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer __lowercase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , ) def a__ ( self : Any , _UpperCAmelCase : str ) -> int: """simple docstring""" __lowercase = self.layers(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int: """simple docstring""" super().__init__() __lowercase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ): self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) ) def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention: """simple docstring""" __lowercase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowercase = hidden_states + (hidden_state,) __lowercase = stage_module(_UpperCAmelCase ) if output_hidden_states: __lowercase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Optional[Any] = RegNetConfig lowerCAmelCase__ : Optional[int] = "regnet" lowerCAmelCase__ : Dict = "pixel_values" lowerCAmelCase__ : List[str] = True def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = value SCREAMING_SNAKE_CASE__ = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SCREAMING_SNAKE_CASE__ = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class A__ ( lowerCAmelCase__ ): def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config __lowercase = RegNetEmbeddings(_UpperCAmelCase ) __lowercase = RegNetEncoder(_UpperCAmelCase ) __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" __lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.embedder(_UpperCAmelCase ) __lowercase = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = encoder_outputs[0] __lowercase = self.pooler(_UpperCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class A__ ( lowerCAmelCase__ ): def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config.num_labels __lowercase = RegNetModel(_UpperCAmelCase ) # classification head __lowercase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: """simple docstring""" __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = outputs.pooler_output if return_dict else outputs[1] __lowercase = self.classifier(_UpperCAmelCase ) __lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase = 'single_label_classification' else: __lowercase = 'multi_label_classification' if self.config.problem_type == "regression": __lowercase = MSELoss() if self.num_labels == 1: __lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": __lowercase = CrossEntropyLoss() __lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase = BCEWithLogitsLoss() __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) if not return_dict: __lowercase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
325
1
'''simple docstring''' import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ): _A : Optional[Any] = model.config _A : Dict = DonutSwinConfig( image_size=original_config.input_size ,patch_size=4 ,depths=original_config.encoder_layer ,num_heads=[4, 8, 16, 32] ,window_size=original_config.window_size ,embed_dim=128 ,) _A : Any = MBartConfig( is_decoder=lowerCamelCase ,is_encoder_decoder=lowerCamelCase ,add_cross_attention=lowerCamelCase ,decoder_layers=original_config.decoder_layer ,max_position_embeddings=original_config.max_position_embeddings ,vocab_size=len( model.decoder.tokenizer ) ,scale_embedding=lowerCamelCase ,add_final_layer_norm=lowerCamelCase ,) return encoder_config, decoder_config def lowerCAmelCase__ ( lowerCamelCase : List[str] ): if "encoder.model" in name: _A : Dict = name.replace('encoder.model' ,'encoder' ) if "decoder.model" in name: _A : Dict = name.replace('decoder.model' ,'decoder' ) if "patch_embed.proj" in name: _A : int = name.replace('patch_embed.proj' ,'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _A : Tuple = name.replace('patch_embed.norm' ,'embeddings.norm' ) if name.startswith('encoder' ): if "layers" in name: _A : Union[str, Any] = 'encoder.' + name if "attn.proj" in name: _A : Optional[int] = name.replace('attn.proj' ,'attention.output.dense' ) if "attn" in name and "mask" not in name: _A : Optional[Any] = name.replace('attn' ,'attention.self' ) if "norm1" in name: _A : Tuple = name.replace('norm1' ,'layernorm_before' ) if "norm2" in name: _A : Any = name.replace('norm2' ,'layernorm_after' ) if "mlp.fc1" in name: _A : Optional[int] = name.replace('mlp.fc1' ,'intermediate.dense' ) if "mlp.fc2" in name: _A : Union[str, Any] = name.replace('mlp.fc2' ,'output.dense' ) if name == "encoder.norm.weight": _A : Optional[int] = 'encoder.layernorm.weight' if name == "encoder.norm.bias": _A : Dict = 'encoder.layernorm.bias' return name def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Any ): for key in orig_state_dict.copy().keys(): _A : Tuple = orig_state_dict.pop(lowerCamelCase ) if "qkv" in key: _A : str = key.split('.' ) _A : Any = int(key_split[3] ) _A : Dict = int(key_split[5] ) _A : Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _A : List[str] = val[:dim, :] _A : List[Any] = val[dim : dim * 2, :] _A : Optional[Any] = val[-dim:, :] else: _A : str = val[:dim] _A : Optional[int] = val[dim : dim * 2] _A : Optional[int] = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: _A : Dict = val return orig_state_dict def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Any=False ): # load original model _A : Optional[Any] = DonutModel.from_pretrained(lowerCamelCase ).eval() # load HuggingFace model _A : Tuple = get_configs(lowerCamelCase ) _A : List[Any] = DonutSwinModel(lowerCamelCase ) _A : Any = MBartForCausalLM(lowerCamelCase ) _A : Tuple = VisionEncoderDecoderModel(encoder=lowerCamelCase ,decoder=lowerCamelCase ) model.eval() _A : str = original_model.state_dict() _A : Any = convert_state_dict(lowerCamelCase ,lowerCamelCase ) model.load_state_dict(lowerCamelCase ) # verify results on scanned document _A : Tuple = load_dataset('hf-internal-testing/example-documents' ) _A : str = dataset['test'][0]['image'].convert('RGB' ) _A : Tuple = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase ,from_slow=lowerCamelCase ) _A : Union[str, Any] = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis ,size=original_model.config.input_size[::-1] ) _A : Union[str, Any] = DonutProcessor(lowerCamelCase ,lowerCamelCase ) _A : Union[str, Any] = processor(lowerCamelCase ,return_tensors='pt' ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": _A : Union[str, Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' _A : List[str] = 'When is the coffee break?' _A : str = task_prompt.replace('{user_input}' ,lowerCamelCase ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": _A : Dict = '<s_rvlcdip>' elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: _A : List[str] = '<s_cord>' elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": _A : Tuple = 's_cord-v2>' elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": _A : str = '<s_zhtrainticket>' elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt _A : Tuple = 'hello world' else: raise ValueError('Model name not supported' ) _A : str = original_model.decoder.tokenizer(lowerCamelCase ,add_special_tokens=lowerCamelCase ,return_tensors='pt' )[ 'input_ids' ] _A : List[Any] = original_model.encoder.model.patch_embed(lowerCamelCase ) _A : Dict = model.encoder.embeddings(lowerCamelCase ) assert torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=1E-3 ) # verify encoder hidden states _A : Optional[int] = original_model.encoder(lowerCamelCase ) _A : Tuple = model.encoder(lowerCamelCase ).last_hidden_state assert torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=1E-2 ) # verify decoder hidden states _A : int = original_model(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ).logits _A : List[Any] = model(lowerCamelCase ,decoder_input_ids=lowerCamelCase ).logits assert torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=1E-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) if push_to_hub: model.push_to_hub('nielsr/' + model_name.split('/' )[-1] ,commit_message='Update model' ) processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] ,commit_message='Update model' ) if __name__ == "__main__": A : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''naver-clova-ix/donut-base-finetuned-docvqa''', required=False, type=str, help='''Name of the original model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, required=False, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub.''', ) A : Dict = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
353
'''simple docstring''' import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() A : int = 2 class __lowerCamelCase : """simple docstring""" def __init__( self : List[str] , *, # begin keyword-only arguments SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE : int="<pad>" , SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE : Tuple="<unk>" , SCREAMING_SNAKE_CASE : List[Any]=None , ): _A , _A , _A , _A : Any = bos, unk, pad, eos _A : Optional[Any] = [] _A : Optional[Any] = [] _A : Optional[int] = {} _A : Dict = self.add_symbol(SCREAMING_SNAKE_CASE) _A : List[str] = self.add_symbol(SCREAMING_SNAKE_CASE) _A : str = self.add_symbol(SCREAMING_SNAKE_CASE) _A : Any = self.add_symbol(SCREAMING_SNAKE_CASE) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(SCREAMING_SNAKE_CASE) _A : List[str] = len(self.symbols) def __eq__( self : int , SCREAMING_SNAKE_CASE : Optional[Any]): return self.indices == other.indices def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple): if idx < len(self.symbols): return self.symbols[idx] return self.unk_word def __len__( self : Union[str, Any]): return len(self.symbols) def __contains__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]): return sym in self.indices @classmethod def A ( cls : Dict , SCREAMING_SNAKE_CASE : Optional[Any]): _A : Any = cls() d.add_from_file(SCREAMING_SNAKE_CASE) return d def A ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=1 , SCREAMING_SNAKE_CASE : int=False): if word in self.indices and not overwrite: _A : str = self.indices[word] _A : List[str] = self.count[idx] + n return idx else: _A : Optional[Any] = len(self.symbols) _A : Union[str, Any] = idx self.symbols.append(SCREAMING_SNAKE_CASE) self.count.append(SCREAMING_SNAKE_CASE) return idx def A ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int]): return 0 def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE): try: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8') as fd: self.add_from_file(SCREAMING_SNAKE_CASE) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE)) return _A : Union[str, Any] = f.readlines() _A : Any = self._load_meta(SCREAMING_SNAKE_CASE) for line in lines[indices_start_line:]: try: _A , _A : List[str] = line.rstrip().rsplit(' ' , 1) if field == "#fairseq:overwrite": _A : int = True _A , _A : List[str] = line.rsplit(' ' , 1) else: _A : Union[str, Any] = False _A : List[str] = int(SCREAMING_SNAKE_CASE) _A : Optional[Any] = line if word in self and not overwrite: raise RuntimeError( 'Duplicate word found when loading Dictionary: \'{}\'. ' 'Duplicate words can overwrite earlier ones by adding the ' '#fairseq:overwrite flag at the end of the corresponding row ' 'in the dictionary file. If using the Camembert model, please ' 'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE)) self.add_symbol(SCREAMING_SNAKE_CASE , n=SCREAMING_SNAKE_CASE , overwrite=SCREAMING_SNAKE_CASE) except ValueError: raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'') def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} _A : Union[str, Any] = dict((re.sub(R'@@$' ,'' ,lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' ,'</w>' ,lowerCamelCase ), v) for k, v in d.items() ) _A : Optional[Any] = '<s> <pad> </s> <unk>'.split() # restore the special tokens for k in keep_keys: del da[F'{k}</w>'] _A : str = d[k] # restore return da def lowerCAmelCase__ ( lowerCamelCase : List[str] ,lowerCamelCase : List[str] ): # prep if not os.path.exists(lowerCamelCase ): raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' ) os.makedirs(lowerCamelCase ,exist_ok=lowerCamelCase ) print(F'Writing results to {pytorch_dump_folder_path}' ) # handle various types of models _A : Dict = os.path.join(lowerCamelCase ,'checkpoint.pt' ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {checkpoint_file} does not exist!' ) _A : int = torch.load(lowerCamelCase ,map_location='cpu' ) _A : Dict = chkpt['cfg']['model'] # dicts _A : Any = os.path.join(lowerCamelCase ,'dict.txt' ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {dict_file} does not exist!' ) _A : Any = Dictionary.load(lowerCamelCase ) _A : Optional[int] = rewrite_dict_keys(src_dict.indices ) _A : List[Any] = len(lowerCamelCase ) _A : str = os.path.join(lowerCamelCase ,VOCAB_FILES_NAMES['vocab_file'] ) print(F'Generating {src_vocab_file} of {src_vocab_size} records' ) with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f: f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) ) # merges_file (bpecodes) _A : Optional[int] = os.path.join(lowerCamelCase ,'bpecodes' ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {bpecodes_file} does not exist!' ) _A : Dict = os.path.join(lowerCamelCase ,VOCAB_FILES_NAMES['merges_file'] ) shutil.copyfile(lowerCamelCase ,lowerCamelCase ) # model config _A : str = os.path.join(lowerCamelCase ,'config.json' ) _A : int = { 'activation_dropout': args['activation_dropout'], 'architectures': ['BioGptForCausalLM'], 'attention_probs_dropout_prob': args['attention_dropout'], 'bos_token_id': 0, 'eos_token_id': 2, 'hidden_act': args['activation_fn'], 'hidden_dropout_prob': args['dropout'], 'hidden_size': args['decoder_embed_dim'], 'initializer_range': 0.02, 'intermediate_size': args['decoder_ffn_embed_dim'], 'layer_norm_eps': 1E-12, 'layerdrop': args['decoder_layerdrop'], 'max_position_embeddings': args['max_target_positions'], 'model_type': 'biogpt', 'num_attention_heads': args['decoder_attention_heads'], 'num_hidden_layers': args['decoder_layers'], 'pad_token_id': 1, 'scale_embedding': not args['no_scale_embedding'], 'tie_word_embeddings': args['share_decoder_input_output_embed'], 'vocab_size': src_vocab_size, } # good hparam defaults to start with print(F'Generating {biogpt_model_config_file}' ) with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f: f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) ) # tokenizer config _A : Union[str, Any] = os.path.join(lowerCamelCase ,lowerCamelCase ) _A : Any = { 'bos_token': '<s>', 'eos_token': '</s>', 'model_max_length': 1024, 'pad_token': '<pad>', 'special_tokens_map_file': None, 'tokenizer_class': 'BioGptTokenizer', 'unk_token': '<unk>', } print(F'Generating {biogpt_tokenizer_config_file}' ) with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f: f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) ) # model _A : List[Any] = chkpt['model'] # remove unneeded keys _A : int = [ 'decoder.version', ] for k in ignore_keys: model_state_dict.pop(lowerCamelCase ,lowerCamelCase ) _A : Any = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('output_projection.weight' ): _A : str = model_state_dict.pop(lowerCamelCase ) else: _A : Dict = model_state_dict.pop(lowerCamelCase ) _A : Any = BioGptConfig.from_pretrained(lowerCamelCase ) _A : Union[str, Any] = BioGptForCausalLM(lowerCamelCase ) # check that it loads ok model_new.load_state_dict(lowerCamelCase ) # save _A : Union[str, Any] = os.path.join(lowerCamelCase ,lowerCamelCase ) print(F'Generating {pytorch_weights_dump_path}' ) torch.save(lowerCamelCase ,lowerCamelCase ) print('Conversion is done!' ) if __name__ == "__main__": A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--biogpt_checkpoint_path''', default=None, type=str, required=True, help=( '''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,''' ''' bpecodes, etc.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A : int = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
227
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { "configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"], "configuration_data2vec_text": [ "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecTextConfig", "Data2VecTextOnnxConfig", ], "configuration_data2vec_vision": [ "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecVisionConfig", "Data2VecVisionOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecAudioForAudioFrameClassification", "Data2VecAudioForCTC", "Data2VecAudioForSequenceClassification", "Data2VecAudioForXVector", "Data2VecAudioModel", "Data2VecAudioPreTrainedModel", ] __A = [ "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecTextForCausalLM", "Data2VecTextForMaskedLM", "Data2VecTextForMultipleChoice", "Data2VecTextForQuestionAnswering", "Data2VecTextForSequenceClassification", "Data2VecTextForTokenClassification", "Data2VecTextModel", "Data2VecTextPreTrainedModel", ] __A = [ "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecVisionForImageClassification", "Data2VecVisionForMaskedImageModeling", "Data2VecVisionForSemanticSegmentation", "Data2VecVisionModel", "Data2VecVisionPreTrainedModel", ] if is_tf_available(): __A = [ "TFData2VecVisionForImageClassification", "TFData2VecVisionForSemanticSegmentation", "TFData2VecVisionModel", "TFData2VecVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
90
'''simple docstring''' def __lowerCamelCase ( A__ ) -> list: """simple docstring""" UpperCamelCase = len(A__ ) for i in range(1 , A__ ): UpperCamelCase = collection[i] UpperCamelCase = 0 UpperCamelCase = i - 1 while low <= high: UpperCamelCase = (low + high) // 2 if val < collection[mid]: UpperCamelCase = mid - 1 else: UpperCamelCase = mid + 1 for j in range(A__ , A__ , -1 ): UpperCamelCase = collection[j - 1] UpperCamelCase = val return collection if __name__ == "__main__": _lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip() _lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
28
0
"""simple docstring""" import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline __lowercase = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""") def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_=False , )-> Optional[int]: '''simple docstring''' output_path.parent.mkdir(parents=A_ , exist_ok=A_ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( A_ , A_ , f=output_path.as_posix() , input_names=A_ , output_names=A_ , dynamic_axes=A_ , do_constant_folding=A_ , use_external_data_format=A_ , enable_onnx_checker=A_ , opset_version=A_ , ) else: export( A_ , A_ , f=output_path.as_posix() , input_names=A_ , output_names=A_ , dynamic_axes=A_ , do_constant_folding=A_ , opset_version=A_ , ) @torch.no_grad() def lowercase ( A_ , A_ , A_ , A_ = False )-> List[str]: '''simple docstring''' a : int = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): a : Dict = "cuda" elif fpaa and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA" ) else: a : Optional[int] = "cpu" a : Tuple = StableDiffusionPipeline.from_pretrained(A_ , torch_dtype=A_ ).to(A_ ) a : str = Path(A_ ) # TEXT ENCODER a : List[Any] = pipeline.text_encoder.config.max_position_embeddings a : List[str] = pipeline.text_encoder.config.hidden_size a : Union[str, Any] = pipeline.tokenizer( "A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=A_ , return_tensors="pt" , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=A_ , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={ "input_ids": {0: "batch", 1: "sequence"}, } , opset=A_ , ) del pipeline.text_encoder # UNET a : Tuple = pipeline.unet.config.in_channels a : List[str] = pipeline.unet.config.sample_size a : List[str] = output_path / "unet" / "model.onnx" onnx_export( pipeline.unet , model_args=( torch.randn(2 , A_ , A_ , A_ ).to(device=A_ , dtype=A_ ), torch.randn(2 ).to(device=A_ , dtype=A_ ), torch.randn(2 , A_ , A_ ).to(device=A_ , dtype=A_ ), False, ) , output_path=A_ , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "timestep": {0: "batch"}, "encoder_hidden_states": {0: "batch", 1: "sequence"}, } , opset=A_ , use_external_data_format=A_ , ) a : str = str(unet_path.absolute().as_posix() ) a : Union[str, Any] = os.path.dirname(A_ ) a : Tuple = onnx.load(A_ ) # clean up existing tensor files shutil.rmtree(A_ ) os.mkdir(A_ ) # collate external tensor files into one onnx.save_model( A_ , A_ , save_as_external_data=A_ , all_tensors_to_one_file=A_ , location="weights.pb" , convert_attribute=A_ , ) del pipeline.unet # VAE ENCODER a : Dict = pipeline.vae a : Union[str, Any] = vae_encoder.config.in_channels a : int = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder a : List[Any] = lambda A_ , A_ : vae_encoder.encode(A_ , A_ )[0].sample() onnx_export( A_ , model_args=( torch.randn(1 , A_ , A_ , A_ ).to(device=A_ , dtype=A_ ), False, ) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=A_ , ) # VAE DECODER a : Tuple = pipeline.vae a : str = vae_decoder.config.latent_channels a : Dict = vae_decoder.config.out_channels # forward only through the decoder part a : List[str] = vae_encoder.decode onnx_export( A_ , model_args=( torch.randn(1 , A_ , A_ , A_ ).to(device=A_ , dtype=A_ ), False, ) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=A_ , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: a : List[Any] = pipeline.safety_checker a : int = safety_checker.config.vision_config.num_channels a : List[str] = safety_checker.config.vision_config.image_size a : List[str] = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , A_ , A_ , A_ , ).to(device=A_ , dtype=A_ ), torch.randn(1 , A_ , A_ , A_ ).to(device=A_ , dtype=A_ ), ) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={ "clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "images": {0: "batch", 1: "height", 2: "width", 3: "channels"}, } , opset=A_ , ) del pipeline.safety_checker a : Any = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" ) a : Optional[int] = pipeline.feature_extractor else: a : List[Any] = None a : str = None a : Dict = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=A_ , feature_extractor=A_ , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(A_ ) print("ONNX pipeline saved to" , A_ ) del pipeline del onnx_pipeline a : int = OnnxStableDiffusionPipeline.from_pretrained(A_ , provider="CPUExecutionProvider" ) print("ONNX pipeline is loadable" ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument( """--model_path""", type=str, required=True, help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""", ) parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--opset""", default=14, type=int, help="""The version of the ONNX operator set to use.""", ) parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""") __lowercase = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
226
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """char""" UpperCAmelCase : Optional[Any] = """bpe""" UpperCAmelCase : Optional[Any] = """wp""" __lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _A ( _a ): """simple docstring""" UpperCAmelCase : Optional[Any] = ["""image_processor""", """char_tokenizer"""] UpperCAmelCase : Optional[Any] = """ViTImageProcessor""" UpperCAmelCase : List[Any] = """MgpstrTokenizer""" def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : str): a : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) a : List[str] = kwargs.pop("feature_extractor") a : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") a : Union[str, Any] = tokenizer a : int = AutoTokenizer.from_pretrained("gpt2") a : str = AutoTokenizer.from_pretrained("bert-base-uncased") super().__init__(__UpperCAmelCase , __UpperCAmelCase) def __call__( self : str , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : int): if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: a : List[str] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase) if text is not None: a : Optional[Any] = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase) if text is None: return inputs elif images is None: return encodings else: a : Any = encodings["input_ids"] return inputs def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str]): a , a , a : Tuple = sequences a : Optional[int] = char_preds.size(0) a , a : Dict = self._decode_helper(__UpperCAmelCase , "char") a , a : Dict = self._decode_helper(__UpperCAmelCase , "bpe") a , a : Union[str, Any] = self._decode_helper(__UpperCAmelCase , "wp") a : Any = [] a : Union[str, Any] = [] for i in range(__UpperCAmelCase): a : Any = [char_scores[i], bpe_scores[i], wp_scores[i]] a : Optional[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] a : List[str] = scores.index(max(__UpperCAmelCase)) final_strs.append(strs[max_score_index]) final_scores.append(scores[max_score_index]) a : Dict = {} a : List[str] = final_strs a : str = final_scores a : int = char_strs a : int = bpe_strs a : Tuple = wp_strs return out def __snake_case ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]): if format == DecodeType.CHARACTER: a : int = self.char_decode a : int = 1 a : Dict = "[s]" elif format == DecodeType.BPE: a : List[str] = self.bpe_decode a : List[str] = 2 a : int = "#" elif format == DecodeType.WORDPIECE: a : Union[str, Any] = self.wp_decode a : List[str] = 102 a : int = "[SEP]" else: raise ValueError(f'''Format {format} is not supported.''') a , a : str = [], [] a : Optional[int] = pred_logits.size(0) a : List[str] = pred_logits.size(1) a , a : Tuple = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase) a : List[str] = preds_index.view(-1 , __UpperCAmelCase)[:, 1:] a : Any = decoder(__UpperCAmelCase) a , a : Union[str, Any] = torch.nn.functional.softmax(__UpperCAmelCase , dim=2).max(dim=2) a : Union[str, Any] = preds_max_prob[:, 1:] for index in range(__UpperCAmelCase): a : str = preds_str[index].find(__UpperCAmelCase) a : Optional[Any] = preds_str[index][:pred_eos] a : Optional[int] = preds_index[index].cpu().tolist() a : Optional[int] = pred_index.index(__UpperCAmelCase) if eos_token in pred_index else -1 a : List[str] = preds_max_prob[index][: pred_eos_index + 1] a : int = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__UpperCAmelCase) conf_scores.append(__UpperCAmelCase) return dec_strs, conf_scores def __snake_case ( self : Optional[int] , __UpperCAmelCase : Any): a : Dict = [seq.replace(" " , "") for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase)] return decode_strs def __snake_case ( self : Optional[int] , __UpperCAmelCase : List[str]): return self.bpe_tokenizer.batch_decode(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Any = [seq.replace(" " , "") for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase)] return decode_strs
226
1
import csv import tweepy # Twitter API credentials a ="""""" a ="""""" a ="""""" a ="""""" def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None: # authorize twitter, initialize tweepy __lowerCamelCase : Tuple = tweepy.OAuthHandler(lowerCamelCase__ , lowerCamelCase__ ) auth.set_access_token(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : Optional[int] = tweepy.API(lowerCamelCase__ ) # initialize a list to hold all the tweepy Tweets __lowerCamelCase : str = [] # make initial request for most recent tweets (200 is the maximum allowed count) __lowerCamelCase : Union[str, Any] = api.user_timeline(screen_name=lowerCamelCase__ , count=2_0_0 ) # save most recent tweets alltweets.extend(lowerCamelCase__ ) # save the id of the oldest tweet less one __lowerCamelCase : Any = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCamelCase__ ) > 0: print(F"getting tweets before {oldest}" ) # all subsequent requests use the max_id param to prevent duplicates __lowerCamelCase : str = api.user_timeline( screen_name=lowerCamelCase__ , count=2_0_0 , max_id=lowerCamelCase__ ) # save most recent tweets alltweets.extend(lowerCamelCase__ ) # update the id of the oldest tweet less one __lowerCamelCase : Optional[int] = alltweets[-1].id - 1 print(F"...{len(lowerCamelCase__ )} tweets downloaded so far" ) # transform the tweepy tweets into a 2D array that will populate the csv __lowerCamelCase : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f: __lowerCamelCase : Any = csv.writer(lowerCamelCase__ ) writer.writerow(['id', 'created_at', 'text'] ) writer.writerows(lowerCamelCase__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
73
'''simple docstring''' import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _A ( _lowerCAmelCase=32 , _lowerCAmelCase=10 , _lowerCAmelCase=100 , _lowerCAmelCase=1_026 , _lowerCAmelCase=True , _lowerCAmelCase="data/tokenized_stories_train_wikitext103.jbl" , _lowerCAmelCase="igf_context_pairs.jbl" , ): """simple docstring""" set_seed(3 ) # generate train_data and objective_set __lowercase , __lowercase =generate_datasets( _lowerCAmelCase , _lowerCAmelCase , number=_lowerCAmelCase , min_len=1_026 , trim=_lowerCAmelCase ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? __lowercase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' ) # load pretrained model __lowercase =load_gpta('gpt2' ).to(_lowerCAmelCase ) print('computing perplexity on objective set' ) __lowercase =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).item() print('perplexity on objective set:' , _lowerCAmelCase ) # collect igf pairs and save to file demo.jbl collect_objective_set(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _A ( _lowerCAmelCase , _lowerCAmelCase=15 , _lowerCAmelCase=128 , _lowerCAmelCase=100 , _lowerCAmelCase="igf_model.pt" , ): """simple docstring""" set_seed(42 ) # Load pre-trained model __lowercase =GPTaLMHeadModel.from_pretrained('gpt2' ) # Initialize secondary learner to use embedding weights of model __lowercase =SecondaryLearner(_lowerCAmelCase ) # Train secondary learner __lowercase =train_secondary_learner( _lowerCAmelCase , _lowerCAmelCase , max_epochs=_lowerCAmelCase , batch_size=_lowerCAmelCase , eval_freq=100 , igf_model_path=_lowerCAmelCase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=32 , _lowerCAmelCase=1_000 , _lowerCAmelCase=16 , _lowerCAmelCase=1.0 , _lowerCAmelCase=recopy_gpta , _lowerCAmelCase=None , _lowerCAmelCase=10 , _lowerCAmelCase="gpt2_finetuned.pt" , ): """simple docstring""" __lowercase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' ) __lowercase =RandomSampler(_lowerCAmelCase ) __lowercase =DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase ) __lowercase =max_steps // (len(_lowerCAmelCase )) + 1 __lowercase =0 __lowercase =torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCAmelCase ) __lowercase , __lowercase , __lowercase =recopy_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) model.train() if secondary_learner is not None: secondary_learner.to(_lowerCAmelCase ) secondary_learner.eval() __lowercase =[] __lowercase =0 __lowercase =[] __lowercase =[] # Compute the performance of the transformer model at the beginning __lowercase =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) test_perps.append(_lowerCAmelCase ) print('Test perplexity, step' , _lowerCAmelCase , ':' , _lowerCAmelCase ) for epoch in range(int(_lowerCAmelCase ) ): for step, example in enumerate(_lowerCAmelCase ): torch.cuda.empty_cache() __lowercase =random.randint(0 , example.size(2 ) - context_len - 1 ) __lowercase =example[0, 0, start : start + context_len] lm_optimizer.zero_grad() __lowercase =model(_lowerCAmelCase , labels=_lowerCAmelCase ) __lowercase =True if secondary_learner is not None: __lowercase =secondary_learner.forward( torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase ).unsqueeze(0 ) )[0].item() observed_qs.append(float(_lowerCAmelCase ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: __lowercase =-1 if predicted_q < threshold: __lowercase =False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) __lowercase =outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() __lowercase =0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: __lowercase =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) test_perps.append(_lowerCAmelCase ) print('Test perplexity, step' , _lowerCAmelCase , ':' , _lowerCAmelCase ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , _lowerCAmelCase ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _A ( ): """simple docstring""" __lowercase =argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' ) # Required parameters parser.add_argument( '--data_dir' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='The input data dir. Should contain data files for WikiText.' , ) parser.add_argument( '--model_name_or_path' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--data_file' , type=_lowerCAmelCase , default=_lowerCAmelCase , help=( 'A jbl file containing tokenized data which can be split as objective dataset, ' 'train_dataset and test_dataset.' ) , ) parser.add_argument( '--igf_data_file' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , ) parser.add_argument( '--output_dir' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='The output directory where the final fine-tuned model is stored.' , ) parser.add_argument( '--tokenizer_name' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument('--seed' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='A seed for reproducible training.' ) parser.add_argument( '--context_len' , default=32 , type=_lowerCAmelCase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--size_objective_set' , default=100 , type=_lowerCAmelCase , help='number of articles that are long enough to be used as our objective set' , ) parser.add_argument( '--eval_freq' , default=100 , type=_lowerCAmelCase , help='secondary model evaluation is triggered at eval_freq' ) parser.add_argument('--max_steps' , default=1_000 , type=_lowerCAmelCase , help='To calculate training epochs' ) parser.add_argument( '--secondary_learner_batch_size' , default=128 , type=_lowerCAmelCase , help='batch size of training data for secondary learner' , ) parser.add_argument( '--batch_size' , default=16 , type=_lowerCAmelCase , help='batch size of training data of language model(gpt2) ' ) parser.add_argument( '--eval_interval' , default=10 , type=_lowerCAmelCase , help=( 'decay the selectivity of our secondary learner filter from' '1 standard deviation above average to 1 below average after 10 batches' ) , ) parser.add_argument( '--number' , default=100 , type=_lowerCAmelCase , help='The number of examples split to be used as objective_set/test_data' ) parser.add_argument( '--min_len' , default=1_026 , type=_lowerCAmelCase , help='The minimum length of the article to be used as objective set' ) parser.add_argument( '--secondary_learner_max_epochs' , default=15 , type=_lowerCAmelCase , help='number of epochs to train secondary learner' ) parser.add_argument('--trim' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='truncate the example if it exceeds context length' ) parser.add_argument( '--threshold' , default=1.0 , type=_lowerCAmelCase , help=( 'The threshold value used by secondary learner to filter the train_data and allow only' ' informative data as input to the model' ) , ) parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=_lowerCAmelCase , help='finetuned_model_name' ) parser.add_argument( '--recopy_model' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=_lowerCAmelCase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , ) # Load train data for secondary learner __lowercase =joblib.load('data/IGF_values.jbl' ) # Train secondary learner __lowercase =training_secondary_learner( _lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , ) # load pretrained gpt2 model __lowercase =GPTaLMHeadModel.from_pretrained('gpt2' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model __lowercase , __lowercase =generate_datasets( context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1_026 , trim=_lowerCAmelCase ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCAmelCase , secondary_learner=_lowerCAmelCase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , ) if __name__ == "__main__": main()
166
0
'''simple docstring''' import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __lowerCAmelCase = datasets.logging.get_logger(__name__) __lowerCAmelCase = """\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\", author = \"Moosavi, Nafise Sadat and Strube, Michael\", booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\", month = aug, year = \"2016\", address = \"Berlin, Germany\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/P16-1060\", doi = \"10.18653/v1/P16-1060\", pages = \"632--642\", } """ __lowerCAmelCase = """\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. """ __lowerCAmelCase = """ Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting 'keep_singletons=False', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs. min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: 'mentions': mentions 'muc': MUC metric [Vilain et al, 1995] 'bcub': B-cubed [Bagga and Baldwin, 1998] 'ceafe': CEAFe [Luo et al., 2005] 'lea': LEA [Moosavi and Strube, 2016] 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric('coval') >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -', ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)', ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)', ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -', ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -', ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {'mentions/recall': 1.0,[...] 'conll_score': 100.0} """ def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="dummy_doc" ) -> int: _a : Optional[Any] = {doc: key_lines} _a : Tuple = {doc: sys_lines} _a : Optional[int] = {} _a : str = 0 _a : Tuple = 0 _a : int = 0 _a : List[Any] = 0 _a : Any = 0 _a : List[str] = 0 _a , _a : Any = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ ) key_singletons_num += singletons_num if NP_only or min_span: _a : List[str] = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ ) _a , _a : int = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ ) sys_singletons_num += singletons_num if NP_only or min_span: _a : List[Any] = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ ) if remove_nested: _a , _a : Dict = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _a , _a : int = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _a : List[Any] = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ ) _a : Union[str, Any] = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ ) _a : Union[str, Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( 'Number of removed nested coreferring mentions in the key ' f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( 'Number of resulting singleton clusters in the key ' f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ 'files, respectively' ) return doc_coref_infos def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: _a : Optional[Any] = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) _a : Dict = {} _a : Optional[Any] = 0 _a : Tuple = 0 for name, metric in metrics: _a , _a , _a : str = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , ) if conll_subparts_num == 3: _a : List[Any] = (conll / 3) * 100 logger.info(f"""CoNLL score: {conll:.2f}""" ) output_scores.update({'conll_score': conll} ) return output_scores def __lowerCamelCase ( lowerCAmelCase_ ) -> str: _a : Any = False for line in key_lines: if not line.startswith('#' ): if len(line.split() ) > 6: _a : Optional[Any] = line.split()[5] if not parse_col == "-": _a : Any = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __lowercase ( self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Sequence(datasets.Value('string' ) ), } ) ,codebase_urls=['https://github.com/ns-moosavi/coval'] ,reference_urls=[ 'https://github.com/ns-moosavi/coval', 'https://www.aclweb.org/anthology/P16-1060', 'http://www.conll.cemantix.org/2012/data.html', ] ,) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Dict=False ,_UpperCAmelCase : int=False ,_UpperCAmelCase : Tuple=False ): _a : Tuple = [ ('mentions', evaluator.mentions), ('muc', evaluator.muc), ('bcub', evaluator.b_cubed), ('ceafe', evaluator.ceafe), ('lea', evaluator.lea), ] if min_span: _a : Optional[Any] = util.check_gold_parse_annotation(__lowerCAmelCase ) if not has_gold_parse: raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _a : Optional[Any] = evaluate( key_lines=__lowerCAmelCase ,sys_lines=__lowerCAmelCase ,metrics=__lowerCAmelCase ,NP_only=__lowerCAmelCase ,remove_nested=__lowerCAmelCase ,keep_singletons=__lowerCAmelCase ,min_span=__lowerCAmelCase ,) return score
360
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests __lowerCAmelCase = '''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user __lowerCAmelCase = BASE_URL + '''/user''' # https://github.com/settings/tokens __lowerCAmelCase = os.environ.get('''USER_TOKEN''', '''''') def __lowerCamelCase ( lowerCAmelCase_ ) -> dict[Any, Any]: _a : Union[str, Any] = { 'Authorization': f"""token {auth_token}""", 'Accept': 'application/vnd.github.v3+json', } return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f"""{key}: {value}""") else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
107
0
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = args.log_outputs UpperCAmelCase__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric UpperCAmelCase__ = load_metric('wer' ) UpperCAmelCase__ = load_metric('cer' ) # compute metrics UpperCAmelCase__ = wer.compute(references=result['target'] , predictions=result['prediction'] ) UpperCAmelCase__ = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results UpperCAmelCase__ = f'''WER: {wer_result}\nCER: {cer_result}''' print(lowerCamelCase ) with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(lowerCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ = f'''log_{dataset_id}_predictions.txt''' UpperCAmelCase__ = f'''log_{dataset_id}_targets.txt''' with open(lowerCamelCase , 'w' ) as p, open(lowerCamelCase , 'w' ) as t: # mapping function to write output def write_to_file(lowerCamelCase , lowerCamelCase ): p.write(f'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(lowerCamelCase , with_indices=lowerCamelCase ) def a_ ( lowerCamelCase ): UpperCAmelCase__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ = re.sub(lowerCamelCase , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: UpperCAmelCase__ = ' '.join(text.split(lowerCamelCase ) ) return text def a_ ( lowerCamelCase ): # load dataset UpperCAmelCase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ = dataset.cast_column('audio' , Audio(sampling_rate=lowerCamelCase ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowerCamelCase ): UpperCAmelCase__ = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ = prediction['text'] UpperCAmelCase__ = normalize_text(batch['sentence'] ) return batch # run inference on all examples UpperCAmelCase__ = dataset.map(lowerCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowerCamelCase , lowerCamelCase ) if __name__ == "__main__": lowerCAmelCase__ : Dict = argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) lowerCAmelCase__ : Dict = parser.parse_args() main(args)
98
"""simple docstring""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A : List[str] = logging.get_logger(__name__) _A : List[str] = {"""vocab_file""": """spiece.model"""} _A : List[Any] = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), } } _A : str = { """google/bigbird-roberta-base""": 40_96, """google/bigbird-roberta-large""": 40_96, """google/bigbird-base-trivia-itc""": 40_96, } class a__ ( a_ ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] __lowerCAmelCase = [] def __init__( self , _a , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="[SEP]" , _a="[MASK]" , _a="[CLS]" , _a = None , **_a , ): lowercase : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token lowercase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowercase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sep_token=_a , mask_token=_a , cls_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) lowercase : Optional[Any] = vocab_file lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_a ) @property def __magic_name__ ( self ): return self.sp_model.get_piece_size() def __magic_name__ ( self ): lowercase : str = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): lowercase : Union[str, Any] = self.__dict__.copy() lowercase : Dict = None return state def __setstate__( self , _a ): lowercase : str = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowercase : List[str] = {} lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __magic_name__ ( self , _a ): return self.sp_model.encode(_a , out_type=_a ) def __magic_name__ ( self , _a ): return self.sp_model.piece_to_id(_a ) def __magic_name__ ( self , _a ): lowercase : Union[str, Any] = self.sp_model.IdToPiece(_a ) return token def __magic_name__ ( self , _a ): lowercase : List[Any] = [] lowercase : List[Any] = "" lowercase : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_a ) + token lowercase : int = True lowercase : Union[str, Any] = [] else: current_sub_tokens.append(_a ) lowercase : int = False out_string += self.sp_model.decode(_a ) return out_string.strip() def __magic_name__ ( self , _a , _a = False , _a = None , _a = True , **_a , ): lowercase : int = kwargs.pop("use_source_tokenizer" , _a ) lowercase : Union[str, Any] = self.convert_ids_to_tokens(_a , skip_special_tokens=_a ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowercase : Any = [] lowercase : Dict = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) lowercase : int = [] sub_texts.append(_a ) else: current_sub_text.append(_a ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: lowercase : Dict = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(_a ) ) else: lowercase : Union[str, Any] = "".join(_a ) lowercase : int = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowercase : Union[str, Any] = self.clean_up_tokenization(_a ) return clean_text else: return text def __magic_name__ ( self , _a , _a = None ): if not os.path.isdir(_a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase : List[Any] = os.path.join( _a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _a ) elif not os.path.isfile(self.vocab_file ): with open(_a , "wb" ) as fi: lowercase : int = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,) def __magic_name__ ( self , _a , _a = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase : List[str] = [self.cls_token_id] lowercase : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def __magic_name__ ( self , _a , _a = None , _a = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1] def __magic_name__ ( self , _a , _a = None ): lowercase : Tuple = [self.sep_token_id] lowercase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
202
0
__snake_case : str = tuple[float, float, float] __snake_case : Tuple = tuple[float, float, float] def __lowerCamelCase ( __snake_case : Pointad, __snake_case : Pointad ) -> Vectorad: """simple docstring""" A__ : Optional[int] =end_pointa[0] - end_pointa[0] A__ : Tuple =end_pointa[1] - end_pointa[1] A__ : str =end_pointa[2] - end_pointa[2] return (x, y, z) def __lowerCamelCase ( __snake_case : Vectorad, __snake_case : Vectorad ) -> Vectorad: """simple docstring""" A__ : int =ab[1] * ac[2] - ab[2] * ac[1] # *i A__ : Union[str, Any] =(ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j A__ : Optional[int] =ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def __lowerCamelCase ( __snake_case : Vectorad, __snake_case : int ) -> bool: """simple docstring""" return tuple(round(__snake_case, __snake_case ) for x in vector ) == (0, 0, 0) def __lowerCamelCase ( __snake_case : Pointad, __snake_case : Pointad, __snake_case : Pointad, __snake_case : int = 10 ) -> bool: """simple docstring""" A__ : Union[str, Any] =create_vector(__snake_case, __snake_case ) A__ : str =create_vector(__snake_case, __snake_case ) return is_zero_vector(get_ad_vectors_cross(__snake_case, __snake_case ), __snake_case )
360
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __snake_case : Any = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[int] = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : List[Any] = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
136
0
"""simple docstring""" import math def lowercase ( __snake_case : int ): lowercase_ : List[str] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__snake_case ) def lowercase ( __snake_case : float = 1 / 1_2_3_4_5 ): lowercase_ : List[str] = 0 lowercase_ : List[Any] = 0 lowercase_ : Dict = 3 while True: lowercase_ : str = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__snake_case ): lowercase_ : str = int(__snake_case ) total_partitions += 1 if check_partition_perfect(__snake_case ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__snake_case ) integer += 1 if __name__ == "__main__": print(F"""{solution() = }""")
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase: Union[str, Any] = { "configuration_bridgetower": [ "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP", "BridgeTowerConfig", "BridgeTowerTextConfig", "BridgeTowerVisionConfig", ], "processing_bridgetower": ["BridgeTowerProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase: Dict = ["BridgeTowerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase: int = [ "BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST", "BridgeTowerForContrastiveLearning", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", "BridgeTowerModel", "BridgeTowerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys _lowercase: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
227
0
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case : def __init__( self : int , _snake_case : str , _snake_case : Optional[int]=13 , _snake_case : Dict=7 , _snake_case : str=True , _snake_case : Optional[Any]=True , _snake_case : List[Any]=True , _snake_case : int=True , _snake_case : Any=True , _snake_case : Tuple=False , _snake_case : Optional[int]=False , _snake_case : List[Any]=False , _snake_case : Any=2 , _snake_case : Union[str, Any]=99 , _snake_case : int=0 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=5 , _snake_case : List[Any]=4 , _snake_case : Any=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : int=512 , _snake_case : int=2 , _snake_case : List[str]=0.0_2 , _snake_case : Any=2 , _snake_case : int=4 , _snake_case : List[str]="last" , _snake_case : Dict=True , _snake_case : Optional[Any]=None , _snake_case : Optional[Any]=0 , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_input_lengths UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = gelu_activation UpperCAmelCase_ = sinusoidal_embeddings UpperCAmelCase_ = causal UpperCAmelCase_ = asm UpperCAmelCase_ = n_langs UpperCAmelCase_ = vocab_size UpperCAmelCase_ = n_special UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_labels UpperCAmelCase_ = num_choices UpperCAmelCase_ = summary_type UpperCAmelCase_ = use_proj UpperCAmelCase_ = scope UpperCAmelCase_ = bos_token_id def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length]) UpperCAmelCase_ = None if self.use_input_lengths: UpperCAmelCase_ = ( ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase_ = None if self.use_token_type_ids: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) UpperCAmelCase_ = ids_tensor([self.batch_size] , 2).float() UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices) UpperCAmelCase_ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCamelCase ( self : Tuple): """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowerCamelCase ( self : str , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Dict , _snake_case : List[str] , ): """simple docstring""" UpperCAmelCase_ = XLMModel(config=_a) model.to(_a) model.eval() UpperCAmelCase_ = model(_a , lengths=_a , langs=_a) UpperCAmelCase_ = model(_a , langs=_a) UpperCAmelCase_ = model(_a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCamelCase ( self : Dict , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[Any] , ): """simple docstring""" UpperCAmelCase_ = XLMWithLMHeadModel(_a) model.to(_a) model.eval() UpperCAmelCase_ = model(_a , token_type_ids=_a , labels=_a) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Dict , ): """simple docstring""" UpperCAmelCase_ = XLMForQuestionAnsweringSimple(_a) model.to(_a) model.eval() UpperCAmelCase_ = model(_a) UpperCAmelCase_ = model(_a , start_positions=_a , end_positions=_a) UpperCAmelCase_ = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def lowerCamelCase ( self : int , _snake_case : List[Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Any , ): """simple docstring""" UpperCAmelCase_ = XLMForQuestionAnswering(_a) model.to(_a) model.eval() UpperCAmelCase_ = model(_a) UpperCAmelCase_ = model( _a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , p_mask=_a , ) UpperCAmelCase_ = model( _a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , ) ((UpperCAmelCase_ ) , ) = result_with_labels.to_tuple() UpperCAmelCase_ = model(_a , start_positions=_a , end_positions=_a) ((UpperCAmelCase_ ) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , ()) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,)) def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Any , _snake_case : List[str] , ): """simple docstring""" UpperCAmelCase_ = XLMForSequenceClassification(_a) model.to(_a) model.eval() UpperCAmelCase_ = model(_a) UpperCAmelCase_ = model(_a , labels=_a) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Tuple , _snake_case : int , _snake_case : Dict , _snake_case : str , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Dict , ): """simple docstring""" UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = XLMForTokenClassification(_a) model.to(_a) model.eval() UpperCAmelCase_ = model(_a , attention_mask=_a , labels=_a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCamelCase ( self : Dict , _snake_case : Any , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Dict , ): """simple docstring""" UpperCAmelCase_ = self.num_choices UpperCAmelCase_ = XLMForMultipleChoice(config=_a) model.to(_a) model.eval() UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() UpperCAmelCase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() UpperCAmelCase_ = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class __snake_case ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): UpperCAmelCase__ : Optional[Any] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCAmelCase__ : List[Any] = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCAmelCase__ : Dict = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def lowerCamelCase ( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : str): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''') ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[Any]=False): """simple docstring""" UpperCAmelCase_ = super()._prepare_for_class(_a , _a , return_labels=_a) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": UpperCAmelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_a) UpperCAmelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_a) return inputs_dict def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = XLMModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_a , emb_dim=37) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_a) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_a) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_a) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_a) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_a) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_a) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_a) def lowerCamelCase ( self : List[Any] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Any , _snake_case : Any=False , _snake_case : Dict=1): """simple docstring""" self.assertIsInstance(_a , _a) self.assertListEqual( [isinstance(_a , _a) for iter_attentions in attentions] , [True] * len(_a)) self.assertEqual(len(_a) , (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(_a): # adds PAD dummy token UpperCAmelCase_ = min_length + idx + 1 UpperCAmelCase_ = min_length + idx + 1 UpperCAmelCase_ = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_a)) def lowerCamelCase ( self : List[str] , _snake_case : Any , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Optional[Any]=1): """simple docstring""" self.assertIsInstance(_a , _a) self.assertListEqual( [isinstance(_a , _a) for iter_hidden_states in hidden_states] , [True] * len(_a) , ) self.assertEqual(len(_a) , (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(_a): # adds PAD dummy token UpperCAmelCase_ = min_length + idx + 1 UpperCAmelCase_ = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_a) , ) pass @slow def lowerCamelCase ( self : Tuple): """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = XLMModel.from_pretrained(_a) self.assertIsNotNone(_a) @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''') model.to(_a) UpperCAmelCase_ = torch.tensor([[14, 447]] , dtype=torch.long , device=_a) # the president UpperCAmelCase_ = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference UpperCAmelCase_ = model.generate(_a , do_sample=_a) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _a)
371
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = ["DeiTFeatureExtractor"] snake_case_ : List[str] = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[Any] = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
0
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ): '''simple docstring''' UpperCamelCase = StableUnCLIPPipeline UpperCamelCase = TEXT_TO_IMAGE_PARAMS UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false UpperCamelCase = False def snake_case__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[str] = 32 __UpperCAmelCase : Optional[Any] = embedder_hidden_size # prior components torch.manual_seed(0 ) __UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase : Union[str, Any] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=a_ , projection_dim=a_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __UpperCAmelCase : Dict = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a_ , num_layers=1 , ) torch.manual_seed(0 ) __UpperCAmelCase : Optional[int] = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=a_ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) __UpperCAmelCase : List[str] = StableUnCLIPImageNormalizer(embedding_dim=a_ ) __UpperCAmelCase : Any = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) __UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase : List[str] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=a_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __UpperCAmelCase : Dict = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a_ , layers_per_block=1 , upcast_attention=a_ , use_linear_projection=a_ , ) torch.manual_seed(0 ) __UpperCAmelCase : Tuple = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=a_ , steps_offset=1 , ) torch.manual_seed(0 ) __UpperCAmelCase : Any = AutoencoderKL() __UpperCAmelCase : Dict = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def snake_case__ ( self : Tuple , a_ : Tuple , a_ : Dict=0 ): '''simple docstring''' if str(a_ ).startswith('''mps''' ): __UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ ) else: __UpperCAmelCase : List[str] = torch.Generator(device=a_ ).manual_seed(a_ ) __UpperCAmelCase : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def snake_case__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=a_ ) def snake_case__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Optional[int] = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=a_ ) @slow @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def snake_case__ ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Dict = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) __UpperCAmelCase : List[str] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase : str = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase : List[str] = pipe('''anime turle''' , generator=a_ , output_type='''np''' ) __UpperCAmelCase : List[Any] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(a_ , a_ ) def snake_case__ ( self : Dict ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCAmelCase : Optional[Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) __UpperCAmelCase : Union[str, Any] = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase : Optional[int] = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) __UpperCAmelCase : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
226
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __A =logging.get_logger(__name__) __A ={ "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' UpperCamelCase = """deberta-v2""" def __init__( self : Optional[int] , a_ : List[str]=12_81_00 , a_ : Optional[Any]=15_36 , a_ : Optional[Any]=24 , a_ : List[Any]=24 , a_ : Optional[int]=61_44 , a_ : List[Any]="gelu" , a_ : Any=0.1 , a_ : Tuple=0.1 , a_ : Optional[Any]=5_12 , a_ : Tuple=0 , a_ : Dict=0.0_2 , a_ : Optional[Any]=1e-7 , a_ : List[str]=False , a_ : List[Any]=-1 , a_ : List[str]=0 , a_ : Optional[Any]=True , a_ : List[Any]=None , a_ : Optional[int]=0 , a_ : Tuple="gelu" , **a_ : List[str] , ): '''simple docstring''' super().__init__(**a_ ) __UpperCAmelCase : Any = hidden_size __UpperCAmelCase : Any = num_hidden_layers __UpperCAmelCase : List[Any] = num_attention_heads __UpperCAmelCase : Union[str, Any] = intermediate_size __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : str = max_position_embeddings __UpperCAmelCase : int = type_vocab_size __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : Optional[int] = relative_attention __UpperCAmelCase : int = max_relative_positions __UpperCAmelCase : Any = pad_token_id __UpperCAmelCase : int = position_biased_input # Backwards compatibility if type(a_ ) == str: __UpperCAmelCase : Optional[Any] = [x.strip() for x in pos_att_type.lower().split('''|''' )] __UpperCAmelCase : Tuple = pos_att_type __UpperCAmelCase : int = vocab_size __UpperCAmelCase : Optional[Any] = layer_norm_eps __UpperCAmelCase : str = kwargs.get('''pooler_hidden_size''' , a_ ) __UpperCAmelCase : Union[str, Any] = pooler_dropout __UpperCAmelCase : Tuple = pooler_hidden_act class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' @property def snake_case__ ( self : Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": __UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCAmelCase : int = {0: '''batch''', 1: '''sequence'''} if self._config.type_vocab_size > 0: return OrderedDict( [('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] ) else: return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] ) @property def snake_case__ ( self : Union[str, Any] ): '''simple docstring''' return 12 def snake_case__ ( self : Any , a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a_ : int = -1 , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 3 , a_ : int = 40 , a_ : int = 40 , a_ : "PreTrainedTokenizerBase" = None , ): '''simple docstring''' __UpperCAmelCase : List[Any] = super().generate_dummy_inputs(preprocessor=a_ , framework=a_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
226
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"] UpperCAmelCase__ : str = "ViltImageProcessor" UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast") def __init__( self , _a=None , _a=None , **_a ) -> Any: _a : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) _a : Dict = kwargs.pop('''feature_extractor''' ) _a : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) _a : int = self.image_processor def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding: _a : Tuple = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel_values + pixel_mask _a : str = self.image_processor(_a , return_tensors=_a ) encoding.update(_a ) return encoding def __lowercase ( self , *_a , **_a ) -> Optional[Any]: return self.tokenizer.batch_decode(*_a , **_a ) def __lowercase ( self , *_a , **_a ) -> str: return self.tokenizer.decode(*_a , **_a ) @property def __lowercase ( self ) -> Optional[int]: _a : str = self.tokenizer.model_input_names _a : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __lowercase ( self ) -> Optional[Any]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def __lowercase ( self ) -> Any: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
15
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): a__ = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) a__ = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) a__ = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' a__ = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' a__ = '''''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]: """simple docstring""" assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): _a : List[Any] = ReadMe.from_string(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple: """simple docstring""" ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a ) @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Tuple = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[int] = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): _a : Any = ReadMe.from_readme(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Optional[Any] = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : str = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): ReadMe.from_readme(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
15
1