code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
721
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A__ ( __magic_name__ ): lowercase = (DDPMParallelScheduler,) def _lowerCamelCase ( self : str , **a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : str = { 'num_train_timesteps': 1_000, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**a ) return config def _lowerCamelCase ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=a ) def _lowerCamelCase ( self : int ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=a , beta_end=a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' self.check_over_configs(thresholding=a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=a , prediction_type=a , sample_max_value=a , ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=a ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : List[str] = scheduler_class(**a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5 def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Any = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : int = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Tuple = self.dummy_model() lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1 lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1 lowerCAmelCase__ : Tuple = samplea.shape[0] lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a ) lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) lowerCAmelCase__ : str = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3 def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.scheduler_classes[0] lowerCAmelCase__ : List[Any] = self.get_scheduler_config() lowerCAmelCase__ : Dict = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Any = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter lowerCAmelCase__ : Tuple = torch.manual_seed(0 ) for t in reversed(range(a ) ): # 1. predict noise residual lowerCAmelCase__ : Optional[Any] = model(a , a ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample lowerCAmelCase__ : List[str] = pred_prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3 def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase__ : int = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Optional[int] = self.dummy_model() lowerCAmelCase__ : List[str] = self.dummy_sample_deter lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(a ) ): # 1. predict noise residual lowerCAmelCase__ : List[Any] = model(a , a ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample lowerCAmelCase__ : str = pred_prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3 def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=a ) lowerCAmelCase__ : List[Any] = scheduler.timesteps for i, timestep in enumerate(a ): if i == len(a ) - 1: lowerCAmelCase__ : Tuple = -1 else: lowerCAmelCase__ : Dict = timesteps[i + 1] lowerCAmelCase__ : str = scheduler.previous_timestep(a ) lowerCAmelCase__ : int = prev_t.item() self.assertEqual(a , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : Optional[int] = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**a ) lowerCAmelCase__ : str = [100, 87, 50, 51, 0] with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : str = [100, 87, 50, 1, 0] lowerCAmelCase__ : int = len(a ) with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=a , timesteps=a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps] with self.assertRaises( a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=a )
69
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase__ = logging.get_logger(__name__) if is_vision_available(): import PIL class A__ ( _lowercase ): lowercase = ['''pixel_values'''] def __init__( self : Union[str, Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : bool = True , **a : Optional[Any] , ): '''simple docstring''' super().__init__(**A_ ) lowerCAmelCase__ : int = size if size is not None else {'shortest_edge': 224} lowerCAmelCase__ : int = get_size_dict(A_ , default_to_square=A_ ) lowerCAmelCase__ : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowerCAmelCase__ : int = get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' ) lowerCAmelCase__ : Tuple = do_resize lowerCAmelCase__ : Union[str, Any] = size lowerCAmelCase__ : Dict = resample lowerCAmelCase__ : Dict = do_center_crop lowerCAmelCase__ : Dict = crop_size lowerCAmelCase__ : Optional[Any] = do_rescale lowerCAmelCase__ : int = rescale_factor lowerCAmelCase__ : Optional[Any] = do_normalize lowerCAmelCase__ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCAmelCase__ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD lowerCAmelCase__ : Optional[Any] = do_convert_rgb def _lowerCamelCase ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCAmelCase__ : Any = get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def _lowerCamelCase ( self : str , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ): '''simple docstring''' lowerCAmelCase__ : int = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ ) def _lowerCamelCase ( self : Optional[int] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ): '''simple docstring''' return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def _lowerCamelCase ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ): '''simple docstring''' return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def _lowerCamelCase ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : int = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : bool = None , a : Optional[Union[str, TensorType]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , **a : Dict , ): '''simple docstring''' lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ : Union[str, Any] = size if size is not None else self.size lowerCAmelCase__ : Any = get_size_dict(A_ , param_name='size' , default_to_square=A_ ) lowerCAmelCase__ : Dict = resample if resample is not None else self.resample lowerCAmelCase__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ : Any = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ : Tuple = get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ ) lowerCAmelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ : Tuple = image_std if image_std is not None else self.image_std lowerCAmelCase__ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCAmelCase__ : str = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCAmelCase__ : Any = [convert_to_rgb(A_ ) for image in images] # All transformations expect numpy arrays. lowerCAmelCase__ : Dict = [to_numpy_array(A_ ) for image in images] if do_resize: lowerCAmelCase__ : Union[str, Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: lowerCAmelCase__ : List[str] = [self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: lowerCAmelCase__ : str = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: lowerCAmelCase__ : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] lowerCAmelCase__ : List[str] = [to_channel_dimension_format(A_ , A_ ) for image in images] lowerCAmelCase__ : List[Any] = {'pixel_values': images} return BatchFeature(data=A_ , tensor_type=A_ )
700
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A__ ( __magic_name__ ): lowercase = ['image_processor', 'tokenizer'] lowercase = 'LayoutLMv3ImageProcessor' lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a , ) lowerCAmelCase__ : int = kwargs.pop('feature_extractor' ) lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a , a ) def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(a , a ): lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase__ : List[str] = features['words'] lowerCAmelCase__ : List[Any] = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , ) # add pixel values lowerCAmelCase__ : Tuple = features.pop('pixel_values' ) if return_overflowing_tokens is True: lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] ) lowerCAmelCase__ : List[str] = images return encoded_inputs def _lowerCamelCase ( self : Any , a : List[str] , a : int ): '''simple docstring''' lowerCAmelCase__ : int = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(a ) != len(a ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f''' {len(a )} and {len(a )}''' ) return images_with_overflow def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*a , **a ) def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*a , **a ) @property def _lowerCamelCase ( self : int ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , ) return self.image_processor_class @property def _lowerCamelCase ( self : Dict ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , ) return self.image_processor
69
0
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : Dict = [False] * len(__SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Union[str, Any] = [-1] * len(__SCREAMING_SNAKE_CASE ) def dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : int = True lowerCAmelCase__ : List[str] = c for u in graph[v]: if not visited[u]: dfs(__SCREAMING_SNAKE_CASE , 1 - c ) for i in range(len(__SCREAMING_SNAKE_CASE ) ): if not visited[i]: dfs(__SCREAMING_SNAKE_CASE , 0 ) for i in range(len(__SCREAMING_SNAKE_CASE ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph lowerCamelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
701
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class A__ ( __magic_name__ ): def __init__( self : List[str] , a : Optional[Any] , a : int=13 , a : str=7 , a : Any=True , a : List[str]=True , a : Any=False , a : List[Any]=True , a : List[str]=99 , a : Optional[Any]=32 , a : List[str]=5 , a : List[Any]=4 , a : List[Any]=64 , a : List[Any]="gelu" , a : List[Any]=0.1 , a : List[Any]=0.1 , a : int=512 , a : Tuple=16 , a : List[str]=2 , a : int=0.0_2 , a : Union[str, Any]=3 , a : Any=4 , a : Union[str, Any]=None , a : Union[str, Any]=2 , a : List[str]=2 , a : int=2 , a : Dict=2 , a : List[str]=4 , a : str=1 , ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = parent lowerCAmelCase__ : int = batch_size lowerCAmelCase__ : str = seq_length lowerCAmelCase__ : Tuple = is_training lowerCAmelCase__ : List[str] = use_input_mask lowerCAmelCase__ : Optional[int] = use_token_type_ids lowerCAmelCase__ : Any = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : str = hidden_size lowerCAmelCase__ : str = num_hidden_layers lowerCAmelCase__ : List[str] = num_attention_heads lowerCAmelCase__ : int = intermediate_size lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : Optional[int] = type_vocab_size lowerCAmelCase__ : Dict = type_sequence_label_size lowerCAmelCase__ : Optional[int] = initializer_range lowerCAmelCase__ : List[Any] = num_labels lowerCAmelCase__ : Any = num_choices lowerCAmelCase__ : str = scope lowerCAmelCase__ : Any = q_groups lowerCAmelCase__ : Any = k_groups lowerCAmelCase__ : Union[str, Any] = v_groups lowerCAmelCase__ : int = post_attention_groups lowerCAmelCase__ : str = intermediate_groups lowerCAmelCase__ : Union[str, Any] = output_groups def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : Tuple = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : Tuple = None if self.use_labels: lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self : str ): '''simple docstring''' return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : str , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = SqueezeBertModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = model(a , a ) lowerCAmelCase__ : Any = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : int , a : Union[str, Any] , a : Tuple , a : Any ): '''simple docstring''' lowerCAmelCase__ : List[str] = SqueezeBertForMaskedLM(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Any = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[Any] , a : str , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = SqueezeBertForQuestionAnswering(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = model( a , attention_mask=a , start_positions=a , end_positions=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self : Tuple , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : str , a : str , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.num_labels lowerCAmelCase__ : Dict = SqueezeBertForSequenceClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Any , a : int , a : Any , a : Dict , a : Any , a : Tuple , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = self.num_labels lowerCAmelCase__ : Dict = SqueezeBertForTokenClassification(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self : str , a : Optional[int] , a : List[Any] , a : int , a : List[Any] , a : Union[str, Any] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = model( a , attention_mask=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.prepare_config_and_inputs() ((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = config_and_inputs lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) lowercase = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = True lowercase = False def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = SqueezeBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , dim=37 ) def _lowerCamelCase ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a ) @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Optional[int] = SqueezeBertModel.from_pretrained(a ) self.assertIsNotNone(a ) @require_sentencepiece @require_tokenizers @require_torch class A__ ( unittest.TestCase ): @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) lowerCAmelCase__ : str = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] ) lowerCAmelCase__ : Any = model(a )[0] lowerCAmelCase__ : Tuple = torch.Size((1, 3) ) self.assertEqual(output.shape , a ) lowerCAmelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] ) self.assertTrue(torch.allclose(a , a , atol=1E-4 ) )
69
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ : List[str] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def lowerCAmelCase__ ( ) -> Any: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
702
lowerCamelCase__ = """Alexander Joslin""" import operator as op from .stack import Stack def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} lowerCAmelCase__ : Stack[int] = Stack() lowerCAmelCase__ : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) ) elif i in operators: # RULE 2 operator_stack.push(SCREAMING_SNAKE_CASE_ ) elif i == ")": # RULE 4 lowerCAmelCase__ : List[Any] = operator_stack.peek() operator_stack.pop() lowerCAmelCase__ : List[str] = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ : List[Any] = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) operand_stack.push(SCREAMING_SNAKE_CASE_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
69
0
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
703
import numpy class A__ : def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ): '''simple docstring''' lowerCAmelCase__ : int = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCAmelCase__ : Dict = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCAmelCase__ : List[str] = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCAmelCase__ : str = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : str = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCAmelCase__ : Tuple = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCAmelCase__ : Any = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCAmelCase__ : Optional[Any] = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCAmelCase__ : int = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ): '''simple docstring''' for iteration in range(1 , iterations + 1 ): lowerCAmelCase__ : Any = self.feedforward() self.back_propagation() if give_loss: lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'''Iteration {iteration} Loss: {loss}''' ) def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ): '''simple docstring''' lowerCAmelCase__ : Dict = input_arr lowerCAmelCase__ : Any = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCAmelCase__ : int = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCAmelCase__ : List[Any] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray: return (value) * (1 - (value)) def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : Any = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork( input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
69
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: lowerCAmelCase__ : int = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 5_000 ) -> int: lowerCAmelCase__ : List[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )] for i, pentagonal_i in enumerate(_lowerCamelCase ): for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): lowerCAmelCase__ : Any = pentagonal_nums[j] lowerCAmelCase__ : int = pentagonal_i + pentagonal_j lowerCAmelCase__ : Optional[Any] = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
704
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ): '''simple docstring''' lowerCAmelCase__ : str = parent lowerCAmelCase__ : Union[str, Any] = batch_size lowerCAmelCase__ : List[str] = image_size lowerCAmelCase__ : Optional[Any] = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Optional[int] = embed_dim lowerCAmelCase__ : Tuple = depths lowerCAmelCase__ : List[str] = num_heads lowerCAmelCase__ : List[Any] = window_size lowerCAmelCase__ : Any = mlp_ratio lowerCAmelCase__ : Optional[Any] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : int = drop_path_rate lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : int = use_absolute_embeddings lowerCAmelCase__ : List[str] = patch_norm lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = initializer_range lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : List[Any] = scope lowerCAmelCase__ : Dict = use_labels lowerCAmelCase__ : List[Any] = type_sequence_label_size lowerCAmelCase__ : Optional[Any] = encoder_stride def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : Optional[Any] = None if self.use_labels: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : int = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = model(a ) lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : str = model(a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : List[str] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.type_sequence_label_size lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs lowerCAmelCase__ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowercase = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self ) lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) @unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='Swinv2 does not use inputs_embeds' ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[int] = model_class(a ) lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Tuple = [*signature.parameters.keys()] lowerCAmelCase__ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : str = False lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : Dict = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Dict = outputs.attentions lowerCAmelCase__ : Dict = len(self.model_tester.depths ) self.assertEqual(len(a ) , a ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Optional[int] = config.window_size**2 lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCAmelCase__ : Tuple = len(a ) # Check attention is always last and order is fine lowerCAmelCase__ : str = True lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) if hasattr(self.model_tester , 'num_hidden_states_types' ): lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase__ : Any = 2 self.assertEqual(out_len + added_hidden_states , len(a ) ) lowerCAmelCase__ : Dict = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.hidden_states lowerCAmelCase__ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a ) , a ) # Swinv2 has a different seq_length lowerCAmelCase__ : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(a ) , a ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape lowerCAmelCase__ : List[str] = ( reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Any = 3 lowerCAmelCase__ : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase__ : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase__ : str = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a ) self.assertIsNotNone(a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = _config_zero_init(a ) for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(config=a ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class A__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to( a ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**a ) # verify the logits lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
69
0
from __future__ import annotations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict: lowerCAmelCase__ : Dict = len(SCREAMING_SNAKE_CASE_ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(SCREAMING_SNAKE_CASE_ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict: lowerCAmelCase__ : list[list[str]] = [] depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Print all the boards for board in boards: for column in board: print(SCREAMING_SNAKE_CASE_ ) print('' ) print(len(SCREAMING_SNAKE_CASE_ ) , 'solutions were found.' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
705
from itertools import permutations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False lowerCAmelCase__ : str = [7, 11, 13, 17] for i, test in enumerate(SCREAMING_SNAKE_CASE_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int: return sum( int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) for num in permutations(range(SCREAMING_SNAKE_CASE_ ) ) if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
69
0
from __future__ import annotations import string from itertools import cycle, product from pathlib import Path lowerCamelCase__ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase] lowerCamelCase__ = {ord(char) for char in VALID_CHARS} lowerCamelCase__ = ["""the""", """be""", """to""", """of""", """and""", """in""", """that""", """have"""] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: lowerCAmelCase__ : str = "" lowerCAmelCase__ : int lowerCAmelCase__ : int lowerCAmelCase__ : int for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ): lowerCAmelCase__ : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case__ ) return decoded def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: lowerCAmelCase__ : list[str] = [] for key in product(snake_case__ , repeat=3 ): lowerCAmelCase__ : List[Any] = try_key(snake_case__ , snake_case__ ) if encoded is not None: possibles.append(snake_case__ ) return possibles def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: return [possible for possible in possibles if common_word in possible.lower()] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "p059_cipher.txt" ) -> int: lowerCAmelCase__ : list[int] lowerCAmelCase__ : list[str] lowerCAmelCase__ : str lowerCAmelCase__ : str lowerCAmelCase__ : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding='utf-8' ) lowerCAmelCase__ : List[Any] = [int(snake_case__ ) for number in data.strip().split(',' )] lowerCAmelCase__ : Optional[Any] = filter_valid_chars(snake_case__ ) for common_word in COMMON_WORDS: lowerCAmelCase__ : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ ) if len(snake_case__ ) == 1: break lowerCAmelCase__ : Optional[int] = possibles[0] return sum(ord(snake_case__ ) for char in decoded_text ) if __name__ == "__main__": print(F"""{solution() = }""")
706
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( __magic_name__ , unittest.TestCase ): lowercase = ConsistencyModelPipeline lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt lowercase = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet' , ) return unet @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , ) return unet def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ): '''simple docstring''' if class_cond: lowerCAmelCase__ : Tuple = self.dummy_cond_unet else: lowerCAmelCase__ : Dict = self.dummy_uncond_unet # Default to CM multistep sampler lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : List[Any] = { 'unet': unet, 'scheduler': scheduler, } return components def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ): '''simple docstring''' if str(a ).startswith('mps' ): lowerCAmelCase__ : List[str] = torch.manual_seed(a ) else: lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : str = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [22, 0], 'generator': generator, 'output_type': 'np', } return inputs def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Optional[Any] = self.get_dummy_components() lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Tuple = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = self.get_dummy_inputs(a ) lowerCAmelCase__ : str = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : str = image[0, -3:, -3:, -1] lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a ) lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Tuple = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a ) lowerCAmelCase__ : int = 0 lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Dict = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a ) lowerCAmelCase__ : Optional[Any] = 1 lowerCAmelCase__ : Dict = None lowerCAmelCase__ : List[Any] = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a ) lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Optional[Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a ) lowerCAmelCase__ : Dict = 1 lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : str = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a ) lowerCAmelCase__ : List[Any] = { 'num_inference_steps': None, 'timesteps': [22, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a ) lowerCAmelCase__ : Tuple = latents return inputs def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ): '''simple docstring''' if type(a ) == str: lowerCAmelCase__ : str = torch.device(a ) lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a ) return latents def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Optional[Any] = self.get_inputs() lowerCAmelCase__ : Dict = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : Any = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[str] = self.get_inputs() lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : List[str] = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ): lowerCAmelCase__ : Dict = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : str = image[0, -3:, -3:, -1] lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a ) lowerCAmelCase__ : List[str] = 1 lowerCAmelCase__ : str = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ): lowerCAmelCase__ : List[str] = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
69
0
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: lowerCAmelCase__ : List[Any] = torch.exp(__snake_case ) lowerCAmelCase__ : Union[str, Any] = torch.sum(__snake_case , dim=1 ) # sum of exp(x_i) lowerCAmelCase__ : Optional[int] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(__snake_case ) - B / A class A__ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , a : str ): '''simple docstring''' super().__init__() lowerCAmelCase__ : Tuple = config.output_attentions lowerCAmelCase__ : List[str] = config.output_hidden_states lowerCAmelCase__ : str = nn.ModuleList([BertLayer(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] ) lowerCAmelCase__ : Any = nn.ModuleList([BertHighway(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] ) lowerCAmelCase__ : Dict = [-1 for _ in range(config.num_hidden_layers )] def _lowerCamelCase ( self : Union[str, Any] , a : Optional[int] ): '''simple docstring''' if (type(__lowerCAmelCase ) is float) or (type(__lowerCAmelCase ) is int): for i in range(len(self.early_exit_entropy ) ): lowerCAmelCase__ : Optional[Any] = x else: lowerCAmelCase__ : Optional[int] = x def _lowerCamelCase ( self : Any , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def _lowerCamelCase ( self : Any , a : List[str] , a : Optional[int]=None , a : Tuple=None , a : str=None , a : Any=None , ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = () lowerCAmelCase__ : Union[str, Any] = () lowerCAmelCase__ : Optional[Any] = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: lowerCAmelCase__ : Dict = all_hidden_states + (hidden_states,) lowerCAmelCase__ : str = layer_module( __lowerCAmelCase , __lowerCAmelCase , head_mask[i] , __lowerCAmelCase , __lowerCAmelCase ) lowerCAmelCase__ : str = layer_outputs[0] if self.output_attentions: lowerCAmelCase__ : Optional[int] = all_attentions + (layer_outputs[1],) lowerCAmelCase__ : Tuple = (hidden_states,) if self.output_hidden_states: lowerCAmelCase__ : List[str] = current_outputs + (all_hidden_states,) if self.output_attentions: lowerCAmelCase__ : Union[str, Any] = current_outputs + (all_attentions,) lowerCAmelCase__ : Dict = self.highway[i](__lowerCAmelCase ) # logits, pooled_output if not self.training: lowerCAmelCase__ : List[Any] = highway_exit[0] lowerCAmelCase__ : Dict = entropy(__lowerCAmelCase ) lowerCAmelCase__ : List[str] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy lowerCAmelCase__ : Any = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: lowerCAmelCase__ : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(__lowerCAmelCase , i + 1 ) else: lowerCAmelCase__ : Dict = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: lowerCAmelCase__ : List[str] = all_hidden_states + (hidden_states,) lowerCAmelCase__ : Dict = (hidden_states,) if self.output_hidden_states: lowerCAmelCase__ : str = outputs + (all_hidden_states,) if self.output_attentions: lowerCAmelCase__ : List[str] = outputs + (all_attentions,) lowerCAmelCase__ : Union[str, Any] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( 'The Bert Model transformer with early exiting (DeeBERT). ' , __magic_name__ , ) class A__ ( __magic_name__ ): '''simple docstring''' def __init__( self : Union[str, Any] , a : int ): '''simple docstring''' super().__init__(__lowerCAmelCase ) lowerCAmelCase__ : Dict = config lowerCAmelCase__ : Union[str, Any] = BertEmbeddings(__lowerCAmelCase ) lowerCAmelCase__ : Optional[int] = DeeBertEncoder(__lowerCAmelCase ) lowerCAmelCase__ : List[Any] = BertPooler(__lowerCAmelCase ) self.init_weights() def _lowerCamelCase ( self : List[str] ): '''simple docstring''' self.encoder.init_highway_pooler(self.pooler ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' return self.embeddings.word_embeddings def _lowerCamelCase ( self : int , a : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = value def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] ): '''simple docstring''' for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(__lowerCAmelCase ) @add_start_docstrings_to_model_forward(__lowerCAmelCase ) def _lowerCamelCase ( self : Tuple , a : int=None , a : Union[str, Any]=None , a : Optional[Any]=None , a : str=None , a : Any=None , a : int=None , a : int=None , a : List[str]=None , ): '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: lowerCAmelCase__ : str = input_ids.size() elif inputs_embeds is not None: lowerCAmelCase__ : Optional[Any] = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) lowerCAmelCase__ : Dict = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: lowerCAmelCase__ : List[str] = torch.ones(__lowerCAmelCase , device=__lowerCAmelCase ) if encoder_attention_mask is None: lowerCAmelCase__ : Optional[Any] = torch.ones(__lowerCAmelCase , device=__lowerCAmelCase ) if token_type_ids is None: lowerCAmelCase__ : Optional[int] = torch.zeros(__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. lowerCAmelCase__ : Dict = self.get_extended_attention_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: lowerCAmelCase__ : Union[str, Any] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: lowerCAmelCase__ : List[Any] = encoder_attention_mask[:, None, None, :] lowerCAmelCase__ : List[str] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility lowerCAmelCase__ : List[str] = (1.0 - encoder_extended_attention_mask) * -10_000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] lowerCAmelCase__ : Dict = self.get_head_mask(__lowerCAmelCase , self.config.num_hidden_layers ) lowerCAmelCase__ : List[Any] = self.embeddings( input_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self.encoder( __lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , ) lowerCAmelCase__ : Dict = encoder_outputs[0] lowerCAmelCase__ : Any = self.pooler(__lowerCAmelCase ) lowerCAmelCase__ : str = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class A__ ( __magic_name__ ): '''simple docstring''' def __init__( self : Optional[int] , a : Tuple , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = message lowerCAmelCase__ : List[Any] = exit_layer # start from 1! class A__ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , a : str ): '''simple docstring''' super().__init__() lowerCAmelCase__ : Union[str, Any] = BertPooler(__lowerCAmelCase ) lowerCAmelCase__ : str = nn.Dropout(config.hidden_dropout_prob ) lowerCAmelCase__ : Optional[Any] = nn.Linear(config.hidden_size , config.num_labels ) def _lowerCamelCase ( self : List[str] , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = encoder_outputs[0] lowerCAmelCase__ : int = self.pooler(__lowerCAmelCase ) # "return" pooler_output # BertModel lowerCAmelCase__ : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification lowerCAmelCase__ : List[Any] = bmodel_output[1] lowerCAmelCase__ : Optional[Any] = self.dropout(__lowerCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self.classifier(__lowerCAmelCase ) return logits, pooled_output @add_start_docstrings( 'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , __magic_name__ , ) class A__ ( __magic_name__ ): '''simple docstring''' def __init__( self : Union[str, Any] , a : Any ): '''simple docstring''' super().__init__(__lowerCAmelCase ) lowerCAmelCase__ : str = config.num_labels lowerCAmelCase__ : List[str] = config.num_hidden_layers lowerCAmelCase__ : int = DeeBertModel(__lowerCAmelCase ) lowerCAmelCase__ : Any = nn.Dropout(config.hidden_dropout_prob ) lowerCAmelCase__ : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(__lowerCAmelCase ) def _lowerCamelCase ( self : List[Any] , a : Tuple=None , a : List[str]=None , a : Dict=None , a : Any=None , a : List[Any]=None , a : Optional[Any]=None , a : int=None , a : Dict=-1 , a : Optional[Any]=False , ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.num_layers try: lowerCAmelCase__ : Optional[Any] = self.bert( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits lowerCAmelCase__ : List[Any] = outputs[1] lowerCAmelCase__ : int = self.dropout(__lowerCAmelCase ) lowerCAmelCase__ : str = self.classifier(__lowerCAmelCase ) lowerCAmelCase__ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: lowerCAmelCase__ : str = e.message lowerCAmelCase__ : Union[str, Any] = e.exit_layer lowerCAmelCase__ : Optional[Any] = outputs[0] if not self.training: lowerCAmelCase__ : List[str] = entropy(__lowerCAmelCase ) lowerCAmelCase__ : List[Any] = [] lowerCAmelCase__ : Any = [] if labels is not None: if self.num_labels == 1: # We are doing regression lowerCAmelCase__ : Union[str, Any] = MSELoss() lowerCAmelCase__ : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: lowerCAmelCase__ : List[str] = CrossEntropyLoss() lowerCAmelCase__ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits lowerCAmelCase__ : Tuple = [] for highway_exit in outputs[-1]: lowerCAmelCase__ : List[str] = highway_exit[0] if not self.training: highway_logits_all.append(__lowerCAmelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression lowerCAmelCase__ : int = MSELoss() lowerCAmelCase__ : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: lowerCAmelCase__ : Union[str, Any] = CrossEntropyLoss() lowerCAmelCase__ : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(__lowerCAmelCase ) if train_highway: lowerCAmelCase__ : Tuple = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: lowerCAmelCase__ : Optional[int] = (loss,) + outputs if not self.training: lowerCAmelCase__ : Dict = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: lowerCAmelCase__ : List[Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
707
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class A__ ( __magic_name__ ): def __init__( self : int , a : List[str] , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = params lowerCAmelCase__ : Union[str, Any] = np.array(a ) lowerCAmelCase__ : List[Any] = np.array([len(a ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : str , a : List[str] ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Optional[int] ): '''simple docstring''' return len(self.lengths ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.params.max_model_input_size lowerCAmelCase__ : Optional[int] = self.lengths > max_len logger.info(f'''Splitting {sum(a )} too long sequences.''' ) def divide_chunks(a : List[str] , a : Tuple ): return [l[i : i + n] for i in range(0 , len(a ) , a )] lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Union[str, Any] = [] if self.params.mlm: lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: lowerCAmelCase__ , lowerCAmelCase__ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowerCAmelCase__ : Optional[int] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: lowerCAmelCase__ : Dict = np.insert(a , 0 , a ) if sub_s[-1] != sep_id: lowerCAmelCase__ : Dict = np.insert(a , len(a ) , a ) assert len(a ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(a ) new_tok_ids.extend(a ) new_lengths.extend([len(a ) for l in sub_seqs] ) lowerCAmelCase__ : str = np.array(a ) lowerCAmelCase__ : Optional[Any] = np.array(a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = len(self ) lowerCAmelCase__ : List[Any] = self.lengths > 11 lowerCAmelCase__ : Dict = self.token_ids[indices] lowerCAmelCase__ : Tuple = self.lengths[indices] lowerCAmelCase__ : Any = len(self ) logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: lowerCAmelCase__ : int = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : str = len(self ) lowerCAmelCase__ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowerCAmelCase__ : int = (unk_occs / self.lengths) < 0.5 lowerCAmelCase__ : List[str] = self.token_ids[indices] lowerCAmelCase__ : Optional[Any] = self.lengths[indices] lowerCAmelCase__ : Union[str, Any] = len(self ) logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' if not self.params.is_master: return logger.info(f'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _lowerCamelCase ( self : int , a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = [t[0] for t in batch] lowerCAmelCase__ : List[str] = [t[1] for t in batch] assert len(a ) == len(a ) # Max for paddings lowerCAmelCase__ : List[str] = max(a ) # Pad token ids if self.params.mlm: lowerCAmelCase__ : str = self.params.special_tok_ids['pad_token'] else: lowerCAmelCase__ : Optional[int] = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : Tuple = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids] assert len(tk_ ) == len(a ) assert all(len(a ) == max_seq_len_ for t in tk_ ) lowerCAmelCase__ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) lowerCAmelCase__ : List[str] = torch.tensor(a ) # (bs) return tk_t, lg_t
69
0
import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) set_seed(770) lowerCamelCase__ = { "c_attn": "att_proj", "c_proj": "out_proj", "c_fc": "in_proj", "transformer.": "", "h.": "layers.", "ln_1": "layernorm_1", "ln_2": "layernorm_2", "ln_f": "layernorm_final", "wpe": "position_embeds_layer", "wte": "input_embeds_layer", } lowerCamelCase__ = { "text_small": { "repo_id": "suno/bark", "file_name": "text.pt", }, "coarse_small": { "repo_id": "suno/bark", "file_name": "coarse.pt", }, "fine_small": { "repo_id": "suno/bark", "file_name": "fine.pt", }, "text": { "repo_id": "suno/bark", "file_name": "text_2.pt", }, "coarse": { "repo_id": "suno/bark", "file_name": "coarse_2.pt", }, "fine": { "repo_id": "suno/bark", "file_name": "fine_2.pt", }, } lowerCamelCase__ = os.path.dirname(os.path.abspath(__file__)) lowerCamelCase__ = os.path.join(os.path.expanduser("""~"""), """.cache""") lowerCamelCase__ = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""") def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> int: lowerCAmelCase__ : Tuple = model_type if use_small: key += "_small" return os.path.join(UpperCAmelCase__ , REMOTE_MODEL_PATHS[key]['file_name'] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) hf_hub_download(repo_id=UpperCAmelCase__ , filename=UpperCAmelCase__ , local_dir=UpperCAmelCase__ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="text" ) -> str: if model_type == "text": lowerCAmelCase__ : List[Any] = BarkSemanticModel lowerCAmelCase__ : str = BarkSemanticConfig lowerCAmelCase__ : Union[str, Any] = BarkSemanticGenerationConfig elif model_type == "coarse": lowerCAmelCase__ : Union[str, Any] = BarkCoarseModel lowerCAmelCase__ : List[str] = BarkCoarseConfig lowerCAmelCase__ : int = BarkCoarseGenerationConfig elif model_type == "fine": lowerCAmelCase__ : Tuple = BarkFineModel lowerCAmelCase__ : int = BarkFineConfig lowerCAmelCase__ : Optional[int] = BarkFineGenerationConfig else: raise NotImplementedError() lowerCAmelCase__ : List[str] = F'''{model_type}_small''' if use_small else model_type lowerCAmelCase__ : Optional[int] = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(UpperCAmelCase__ ): logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' ) _download(model_info['repo_id'] , model_info['file_name'] ) lowerCAmelCase__ : int = torch.load(UpperCAmelCase__ , map_location=UpperCAmelCase__ ) # this is a hack lowerCAmelCase__ : int = checkpoint["""model_args"""] if "input_vocab_size" not in model_args: lowerCAmelCase__ : Optional[Any] = model_args["""vocab_size"""] lowerCAmelCase__ : Dict = model_args["""vocab_size"""] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments lowerCAmelCase__ : Dict = model_args.pop('n_head' ) lowerCAmelCase__ : Optional[int] = model_args.pop('n_embd' ) lowerCAmelCase__ : Optional[Any] = model_args.pop('n_layer' ) lowerCAmelCase__ : Union[str, Any] = ConfigClass(**checkpoint['model_args'] ) lowerCAmelCase__ : List[str] = ModelClass(config=UpperCAmelCase__ ) lowerCAmelCase__ : int = GenerationConfigClass() lowerCAmelCase__ : List[Any] = model_generation_config lowerCAmelCase__ : str = checkpoint["""model"""] # fixup checkpoint lowerCAmelCase__ : Any = """_orig_mod.""" for k, v in list(state_dict.items() ): if k.startswith(UpperCAmelCase__ ): # replace part of the key with corresponding layer name in HF implementation lowerCAmelCase__ : str = k[len(UpperCAmelCase__ ) :] for old_layer_name in new_layer_name_dict: lowerCAmelCase__ : str = new_k.replace(UpperCAmelCase__ , new_layer_name_dict[old_layer_name] ) lowerCAmelCase__ : Dict = state_dict.pop(UpperCAmelCase__ ) lowerCAmelCase__ : str = set(state_dict.keys() ) - set(model.state_dict().keys() ) lowerCAmelCase__ : str = {k for k in extra_keys if not k.endswith('.attn.bias' )} lowerCAmelCase__ : List[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() ) lowerCAmelCase__ : List[str] = {k for k in missing_keys if not k.endswith('.attn.bias' )} if len(UpperCAmelCase__ ) != 0: raise ValueError(F'''extra keys found: {extra_keys}''' ) if len(UpperCAmelCase__ ) != 0: raise ValueError(F'''missing keys: {missing_keys}''' ) model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ ) lowerCAmelCase__ : Dict = model.num_parameters(exclude_embeddings=UpperCAmelCase__ ) lowerCAmelCase__ : Union[str, Any] = checkpoint["""best_val_loss"""].item() logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(UpperCAmelCase__ , 3 )} loss''' ) model.eval() model.to(UpperCAmelCase__ ) del checkpoint, state_dict return model def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="text" ) -> str: if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() lowerCAmelCase__ : str = """cpu""" # do conversion on cpu lowerCAmelCase__ : List[str] = _get_ckpt_path(UpperCAmelCase__ , use_small=UpperCAmelCase__ ) lowerCAmelCase__ : Union[str, Any] = _load_model(UpperCAmelCase__ , UpperCAmelCase__ , model_type=UpperCAmelCase__ , use_small=UpperCAmelCase__ ) # load bark initial model lowerCAmelCase__ : Union[str, Any] = _bark_load_model(UpperCAmelCase__ , 'cpu' , model_type=UpperCAmelCase__ , use_small=UpperCAmelCase__ ) if model_type == "text": lowerCAmelCase__ : List[Any] = bark_model["""model"""] if model.num_parameters(exclude_embeddings=UpperCAmelCase__ ) != bark_model.get_num_params(): raise ValueError('initial and new models don\'t have the same number of parameters' ) # check if same output as the bark model lowerCAmelCase__ : str = 5 lowerCAmelCase__ : Any = 10 if model_type in ["text", "coarse"]: lowerCAmelCase__ : List[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) lowerCAmelCase__ : Tuple = bark_model(UpperCAmelCase__ )[0] lowerCAmelCase__ : List[Any] = model(UpperCAmelCase__ ) # take last logits lowerCAmelCase__ : Dict = output_new_model_total.logits[:, [-1], :] else: lowerCAmelCase__ : Dict = 3 lowerCAmelCase__ : Dict = 8 lowerCAmelCase__ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) lowerCAmelCase__ : int = model(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase__ : Tuple = bark_model(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase__ : Optional[int] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('initial and new outputs don\'t have the same shape' ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError('initial and new outputs are not equal' ) Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> List[str]: lowerCAmelCase__ : Dict = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase__ : str = BarkSemanticConfig.from_pretrained(os.path.join(UpperCAmelCase__ , 'config.json' ) ) lowerCAmelCase__ : Optional[Any] = BarkCoarseConfig.from_pretrained(os.path.join(UpperCAmelCase__ , 'config.json' ) ) lowerCAmelCase__ : List[Any] = BarkFineConfig.from_pretrained(os.path.join(UpperCAmelCase__ , 'config.json' ) ) lowerCAmelCase__ : Any = EncodecConfig.from_pretrained('facebook/encodec_24khz' ) lowerCAmelCase__ : List[str] = BarkSemanticModel.from_pretrained(UpperCAmelCase__ ) lowerCAmelCase__ : str = BarkCoarseModel.from_pretrained(UpperCAmelCase__ ) lowerCAmelCase__ : List[Any] = BarkFineModel.from_pretrained(UpperCAmelCase__ ) lowerCAmelCase__ : Tuple = EncodecModel.from_pretrained('facebook/encodec_24khz' ) lowerCAmelCase__ : Union[str, Any] = BarkConfig.from_sub_model_configs( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase__ : List[Any] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) lowerCAmelCase__ : Union[str, Any] = BarkModel(UpperCAmelCase__ ) lowerCAmelCase__ : str = semantic lowerCAmelCase__ : Optional[int] = coarseAcoustic lowerCAmelCase__ : Tuple = fineAcoustic lowerCAmelCase__ : Union[str, Any] = codec lowerCAmelCase__ : Tuple = bark_generation_config Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) bark.save_pretrained(UpperCAmelCase__ , repo_id=UpperCAmelCase__ , push_to_hub=UpperCAmelCase__ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""") lowerCamelCase__ = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
708
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
69
0
import os import time import numpy as np import onnxruntime as ort lowerCamelCase__ = '''1''' lowerCamelCase__ = '''0''' lowerCamelCase__ = '''1''' lowerCamelCase__ = ort.SessionOptions() lowerCamelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("""Create inference session...""") lowerCamelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider'''] lowerCamelCase__ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider) lowerCamelCase__ = ort.RunOptions() lowerCamelCase__ = 128 lowerCamelCase__ = 1 lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa) lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa) lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa) print("""Warm up phase...""") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Start inference...""") lowerCamelCase__ = time.time() lowerCamelCase__ = 2000 lowerCamelCase__ = {} for iter in range(max_iters): lowerCamelCase__ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
709
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, """tokenizer_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""", }, } lowerCamelCase__ = { """google/rembert""": 256, } lowerCamelCase__ = """▁""" class A__ ( __magic_name__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = RemBertTokenizer def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , ) lowerCAmelCase__ : int = do_lower_case lowerCAmelCase__ : int = remove_space lowerCAmelCase__ : List[Any] = keep_accents lowerCAmelCase__ : Optional[Any] = vocab_file lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Dict = [self.sep_token_id] lowerCAmelCase__ : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1] return [1] + ([0] * len(a )) + [1] def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Tuple = [self.sep_token_id] lowerCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(a ): logger.error('Vocabulary path ({}) should be a directory'.format(a ) ) return lowerCAmelCase__ : int = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
69
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase__ = logging.get_logger(__name__) if is_vision_available(): import PIL class A__ ( UpperCAmelCase__ ): lowercase = ['pixel_values'] def __init__( self : Dict , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : bool = True , **a : List[str] , ): '''simple docstring''' super().__init__(**__lowerCAmelCase ) lowerCAmelCase__ : Union[str, Any] = size if size is not None else {'shortest_edge': 224} lowerCAmelCase__ : List[str] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowerCAmelCase__ : str = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name='crop_size' ) lowerCAmelCase__ : Tuple = do_resize lowerCAmelCase__ : Dict = size lowerCAmelCase__ : Any = resample lowerCAmelCase__ : Tuple = do_center_crop lowerCAmelCase__ : Union[str, Any] = crop_size lowerCAmelCase__ : int = do_rescale lowerCAmelCase__ : Union[str, Any] = rescale_factor lowerCAmelCase__ : Tuple = do_normalize lowerCAmelCase__ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD lowerCAmelCase__ : List[str] = do_convert_rgb def _lowerCamelCase ( self : int , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ): '''simple docstring''' lowerCAmelCase__ : Dict = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCAmelCase__ : Any = get_resize_output_image_size(__lowerCAmelCase , size=size['shortest_edge'] , default_to_square=__lowerCAmelCase ) return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCamelCase ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = get_size_dict(__lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__lowerCAmelCase , size=(size['height'], size['width']) , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCamelCase ( self : int , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ): '''simple docstring''' return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCamelCase ( self : Any , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : int , ): '''simple docstring''' return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCamelCase ( self : List[Any] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : int = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : bool = None , a : Optional[Union[str, TensorType]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , **a : Tuple , ): '''simple docstring''' lowerCAmelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ : Tuple = size if size is not None else self.size lowerCAmelCase__ : List[str] = get_size_dict(__lowerCAmelCase , param_name='size' , default_to_square=__lowerCAmelCase ) lowerCAmelCase__ : Any = resample if resample is not None else self.resample lowerCAmelCase__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ : str = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ : List[Any] = get_size_dict(__lowerCAmelCase , param_name='crop_size' , default_to_square=__lowerCAmelCase ) lowerCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ : Any = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ : Dict = image_std if image_std is not None else self.image_std lowerCAmelCase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCAmelCase__ : Optional[int] = make_list_of_images(__lowerCAmelCase ) if not valid_images(__lowerCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCAmelCase__ : int = [convert_to_rgb(__lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCAmelCase__ : int = [to_numpy_array(__lowerCAmelCase ) for image in images] if do_resize: lowerCAmelCase__ : int = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images] if do_center_crop: lowerCAmelCase__ : Union[str, Any] = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images] if do_rescale: lowerCAmelCase__ : int = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images] if do_normalize: lowerCAmelCase__ : Optional[int] = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images] lowerCAmelCase__ : Optional[int] = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images] lowerCAmelCase__ : Any = {'pixel_values': images} return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
710
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class A__ ( __magic_name__ ): def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ): '''simple docstring''' super().__init__( features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , ) lowerCAmelCase__ : int = Generator( cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' if self.streaming: lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: lowerCAmelCase__ : Any = None lowerCAmelCase__ : int = None lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : Dict = None self.builder.download_and_prepare( download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , ) lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset( split='train' , verification_mode=a , in_memory=self.keep_in_memory ) return dataset
69
0
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowerCamelCase__ = yaml.safe_load( """\ name: \"\" allow_empty: false allow_empty_text: true subsections: - name: \"Dataset Card for X\" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: \"Table of Contents\" allow_empty: false allow_empty_text: false subsections: null - name: \"Dataset Description\" allow_empty: false allow_empty_text: false subsections: - name: \"Dataset Summary\" allow_empty: false allow_empty_text: false subsections: null - name: \"Supported Tasks and Leaderboards\" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null """ ) lowerCamelCase__ = { """name""": """root""", """text""": """""", """is_empty_text""": True, """subsections""": [ { """name""": """Dataset Card for My Dataset""", """text""": """""", """is_empty_text""": True, """subsections""": [ {"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []}, { """name""": """Dataset Description""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Dataset Summary""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [], }, { """name""": """Supported Tasks and Leaderboards""", """text""": """""", """is_empty_text""": True, """subsections""": [], }, {"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []}, ], }, ], } ], } lowerCamelCase__ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowerCamelCase__ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text """ lowerCamelCase__ = { """name""": """root""", """text""": """""", """is_empty_text""": True, """subsections""": [ { """name""": """Dataset Card for My Dataset""", """text""": """""", """is_empty_text""": True, """subsections""": [ {"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []}, { """name""": """Dataset Description""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Dataset Summary""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Extra Ignored Subsection""", """text""": """""", """is_empty_text""": True, """subsections""": [], } ], }, { """name""": """Supported Tasks and Leaderboards""", """text""": """""", """is_empty_text""": True, """subsections""": [], }, {"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []}, ], }, ], } ], } lowerCamelCase__ = """\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowerCamelCase__ = ( """The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.""" ) lowerCamelCase__ = """\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowerCamelCase__ = ( """The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.""" ) lowerCamelCase__ = """\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowerCamelCase__ = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.""" lowerCamelCase__ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text """ lowerCamelCase__ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).""" lowerCamelCase__ = """\ --- language: - zh - en --- # Dataset Card for My Dataset """ lowerCamelCase__ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'.""" lowerCamelCase__ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text """ lowerCamelCase__ = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.""" lowerCamelCase__ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages """ lowerCamelCase__ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.""" lowerCamelCase__ = """\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowerCamelCase__ = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.""" lowerCamelCase__ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset """ lowerCamelCase__ = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.""" lowerCamelCase__ = """\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowerCamelCase__ = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.""" lowerCamelCase__ = """""" lowerCamelCase__ = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.""" lowerCamelCase__ = """\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowerCamelCase__ = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.""" @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: assert ReadMe.from_string(_snake_case , _snake_case ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: with pytest.raises(_snake_case , match=re.escape(expected_error.format(path='root' ) ) ): lowerCAmelCase__ : List[str] = ReadMe.from_string(_snake_case , _snake_case ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: with pytest.raises(_snake_case , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(_snake_case , _snake_case ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]: ReadMe.from_string(_snake_case , _snake_case , suppress_parsing_errors=_snake_case ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ : Any = Path(_snake_case ) / 'README.md' with open(_snake_case , 'w+' ) as readme_file: readme_file.write(_snake_case ) lowerCAmelCase__ : Optional[Any] = ReadMe.from_readme(_snake_case , _snake_case ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ : Optional[Any] = Path(_snake_case ) / 'README.md' with open(_snake_case , 'w+' ) as readme_file: readme_file.write(_snake_case ) lowerCAmelCase__ : int = expected_error.format(path=_snake_case ) with pytest.raises(_snake_case , match=re.escape(_snake_case ) ): lowerCAmelCase__ : Union[str, Any] = ReadMe.from_readme(_snake_case , _snake_case ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ : int = Path(_snake_case ) / 'README.md' with open(_snake_case , 'w+' ) as readme_file: readme_file.write(_snake_case ) lowerCAmelCase__ : Optional[int] = expected_error.format(path=_snake_case ) with pytest.raises(_snake_case , match=re.escape(_snake_case ) ): ReadMe.from_readme(_snake_case , _snake_case ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ : List[str] = Path(_snake_case ) / 'README.md' with open(_snake_case , 'w+' ) as readme_file: readme_file.write(_snake_case ) ReadMe.from_readme(_snake_case , _snake_case , suppress_parsing_errors=_snake_case )
711
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) class A__ ( __magic_name__ ): lowercase = ['audio_values', 'audio_mask'] def __init__( self : Dict , a : Dict=2_048 , a : Optional[Any]=1 , a : List[Any]=[16, 16] , a : Dict=128 , a : List[str]=44_100 , a : Union[str, Any]=86 , a : Optional[Any]=2_048 , a : List[Any]=0.0 , **a : Tuple , ): '''simple docstring''' super().__init__( feature_size=a , sampling_rate=a , padding_value=a , **a , ) lowerCAmelCase__ : Optional[Any] = spectrogram_length lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Tuple = patch_size lowerCAmelCase__ : Optional[int] = feature_size // self.patch_size[1] lowerCAmelCase__ : Union[str, Any] = n_fft lowerCAmelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate lowerCAmelCase__ : int = sampling_rate lowerCAmelCase__ : Union[str, Any] = padding_value lowerCAmelCase__ : Dict = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T def _lowerCamelCase ( self : Optional[int] , a : np.array ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = spectrogram( a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , ) lowerCAmelCase__ : Any = log_spec[:, :-1] lowerCAmelCase__ : Dict = log_spec - 2_0.0 lowerCAmelCase__ : Tuple = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : str , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : int , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowerCAmelCase__ : Dict = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowerCAmelCase__ : List[Any] = is_batched_numpy or ( isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(a , np.ndarray ): lowerCAmelCase__ : Optional[Any] = np.asarray(a , dtype=np.floataa ) elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase__ : Tuple = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCAmelCase__ : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , a ): lowerCAmelCase__ : Optional[Any] = [np.asarray(a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCAmelCase__ : Optional[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCAmelCase__ : Any = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCAmelCase__ : List[Any] = np.array(a ).astype(np.floataa ) # convert into correct format for padding lowerCAmelCase__ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCAmelCase__ : Dict = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCAmelCase__ : Optional[Any] = padded_audio_features * self.padding_value for i in range(len(a ) ): lowerCAmelCase__ : Tuple = audio_features[i] lowerCAmelCase__ : List[str] = feature # return as BatchFeature if return_attention_mask: lowerCAmelCase__ : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: lowerCAmelCase__ : Any = {'audio_values': padded_audio_features} lowerCAmelCase__ : Any = BatchFeature(data=a , tensor_type=a ) return encoded_inputs
69
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class A__ ( UpperCamelCase_ ): lowercase = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} ) lowercase = Features({'audio': Audio()} ) lowercase = Features({'transcription': Value('string' )} ) lowercase = 'audio' lowercase = 'transcription' def _lowerCamelCase ( self : Tuple , a : List[str] ): '''simple docstring''' if self.audio_column not in features: raise ValueError(f'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] , UpperCamelCase__ ): raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' ) lowerCAmelCase__ : Optional[Any] = copy.deepcopy(self ) lowerCAmelCase__ : Any = self.input_schema.copy() lowerCAmelCase__ : List[str] = features[self.audio_column] lowerCAmelCase__ : str = input_schema return task_template @property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return {self.audio_column: "audio", self.transcription_column: "transcription"}
712
import unittest from transformers import DonutProcessor lowerCamelCase__ = """naver-clova-ix/donut-base""" class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Dict = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } lowerCAmelCase__ : Union[str, Any] = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a ) self.assertDictEqual(a , a )
69
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class A__ : def __init__( self : Optional[Any] , a : Any , a : int=2 , a : Dict=True , a : List[str]=False , a : str=10 , a : Optional[int]=3 , a : Union[str, Any]=32 * 8 , a : List[Any]=32 * 8 , a : int=4 , a : Any=64 , ): '''simple docstring''' lowerCAmelCase__ : int = parent lowerCAmelCase__ : Dict = batch_size lowerCAmelCase__ : int = is_training lowerCAmelCase__ : Dict = use_auxiliary_loss lowerCAmelCase__ : str = num_queries lowerCAmelCase__ : Union[str, Any] = num_channels lowerCAmelCase__ : Tuple = min_size lowerCAmelCase__ : int = max_size lowerCAmelCase__ : Optional[int] = num_labels lowerCAmelCase__ : Any = hidden_dim lowerCAmelCase__ : Tuple = hidden_dim def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE ) > 0.5 ).float() lowerCAmelCase__ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_SCREAMING_SNAKE_CASE ) > 0.5).long() lowerCAmelCase__ : List[Any] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = MaskaFormerConfig( hidden_size=self.hidden_dim , ) lowerCAmelCase__ : Optional[Any] = self.num_queries lowerCAmelCase__ : Any = self.num_labels lowerCAmelCase__ : int = [1, 1, 1, 1] lowerCAmelCase__ : Dict = self.num_channels lowerCAmelCase__ : Dict = 64 lowerCAmelCase__ : int = 128 lowerCAmelCase__ : Optional[Any] = self.hidden_dim lowerCAmelCase__ : Tuple = self.hidden_dim lowerCAmelCase__ : str = self.hidden_dim return config def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() lowerCAmelCase__ : str = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _lowerCamelCase ( self : Optional[int] , a : Optional[int] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = output.encoder_hidden_states lowerCAmelCase__ : List[Any] = output.pixel_decoder_hidden_states lowerCAmelCase__ : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , config.decoder_layers ) def _lowerCamelCase ( self : Dict , a : Union[str, Any] , a : List[Any] , a : int , a : int=False ): '''simple docstring''' with torch.no_grad(): lowerCAmelCase__ : List[str] = MaskaFormerModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase__ : List[str] = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : str = model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _lowerCamelCase ( self : Optional[Any] , a : int , a : Optional[Any] , a : Union[str, Any] , a : str , a : str ): '''simple docstring''' lowerCAmelCase__ : int = MaskaFormerForUniversalSegmentation(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() def comm_check_on_output(a : str ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase__ : str = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Tuple = model(_SCREAMING_SNAKE_CASE ) comm_check_on_output(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Dict = model( pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ) comm_check_on_output(_SCREAMING_SNAKE_CASE ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () lowercase = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {} lowercase = False lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Dict = MaskaFormerModelTester(self ) lowerCAmelCase__ : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_SCREAMING_SNAKE_CASE ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason='Mask2Former is not a generative model' ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def _lowerCamelCase ( self : str ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Any = model_class(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : int = [*signature.parameters.keys()] lowerCAmelCase__ : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: lowerCAmelCase__ : List[str] = MaskaFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = (self.model_tester.min_size,) * 2 lowerCAmelCase__ : List[str] = { 'pixel_values': torch.randn((2, 3, *size) , device=_SCREAMING_SNAKE_CASE ), 'mask_labels': torch.randn((2, 10, *size) , device=_SCREAMING_SNAKE_CASE ), 'class_labels': torch.zeros(2 , 10 , device=_SCREAMING_SNAKE_CASE ).long(), } lowerCAmelCase__ : Optional[Any] = self.model_tester.get_config() lowerCAmelCase__ : Dict = MaskaFormerForUniversalSegmentation(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Any = model(**_SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.loss is not None ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Any = model(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.attentions is not None ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' if not self.model_tester.is_training: return lowerCAmelCase__ : List[Any] = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Tuple = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.train() lowerCAmelCase__ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ).loss loss.backward() def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : str = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Optional[int] = True lowerCAmelCase__ : int = True lowerCAmelCase__ : List[str] = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) model.train() lowerCAmelCase__ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Optional[int] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase__ : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() lowerCAmelCase__ : Optional[Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase__ : str = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCamelCase__ = 1E-4 def lowerCAmelCase__ ( ) -> Dict: lowerCAmelCase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class A__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : str = self.default_image_processor lowerCAmelCase__ : List[Any] = prepare_img() lowerCAmelCase__ : Tuple = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Optional[int] = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 384, 384) ) with torch.no_grad(): lowerCAmelCase__ : Any = model(**_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Any = torch.tensor( [[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) ) lowerCAmelCase__ : Any = torch.tensor( [[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) ) lowerCAmelCase__ : Dict = torch.tensor( [[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE ).eval() lowerCAmelCase__ : Tuple = self.default_image_processor lowerCAmelCase__ : Optional[Any] = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : str = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 384, 384) ) with torch.no_grad(): lowerCAmelCase__ : Dict = model(**_SCREAMING_SNAKE_CASE ) # masks_queries_logits lowerCAmelCase__ : Dict = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) lowerCAmelCase__ : List[Any] = [ [-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1], [-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1], [-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5], ] lowerCAmelCase__ : Any = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) ) # class_queries_logits lowerCAmelCase__ : Optional[int] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Union[str, Any] = torch.tensor( [ [1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2], [0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3], [0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5], ] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE ).eval() lowerCAmelCase__ : Any = self.default_image_processor lowerCAmelCase__ : Optional[Any] = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) lowerCAmelCase__ : str = inputs['pixel_values'].to(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : str = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['mask_labels']] lowerCAmelCase__ : int = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['class_labels']] with torch.no_grad(): lowerCAmelCase__ : str = model(**_SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.loss is not None )
713
from numpy import exp, pi, sqrt def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int: return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
69
0
lowerCamelCase__ = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCamelCase__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCamelCase__ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
714
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class A__ ( __magic_name__ , unittest.TestCase ): lowercase = XLMTokenizer lowercase = False def _lowerCamelCase ( self : int ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase__ : List[str] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) ) lowerCAmelCase__ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(a ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(a ) ) def _lowerCamelCase ( self : List[str] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : List[Any] = 'lower newer' lowerCAmelCase__ : Any = 'lower newer' return input_text, output_text def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase__ : Optional[int] = 'lower' lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>'] lowerCAmelCase__ : Dict = tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowerCAmelCase__ : Tuple = tokens + ['<unk>'] lowerCAmelCase__ : Optional[int] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) @slow def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : List[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' ) lowerCAmelCase__ : Any = tokenizer.encode('sequence builders' , add_special_tokens=a ) lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a ) lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(a ) lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a , a ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
69
0
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline lowerCamelCase__ = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False) parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""") parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""") lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = """cpu""" lowerCamelCase__ = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings""" lowerCamelCase__ = """path-to-your-trained-model""" lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) lowerCamelCase__ = pipe.to(device) # to channels last lowerCamelCase__ = pipe.unet.to(memory_format=torch.channels_last) lowerCamelCase__ = pipe.vae.to(memory_format=torch.channels_last) lowerCamelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: lowerCamelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex lowerCamelCase__ = torch.randn(2, 4, 64, 64) lowerCamelCase__ = torch.rand(1) * 999 lowerCamelCase__ = torch.randn(2, 77, 768) lowerCamelCase__ = (sample, timestep, encoder_hidden_status) try: lowerCamelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: lowerCamelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) lowerCamelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) lowerCamelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: lowerCamelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute lowerCamelCase__ = 666 lowerCamelCase__ = torch.Generator(device).manual_seed(seed) lowerCamelCase__ = {"""generator""": generator} if args.steps is not None: lowerCamelCase__ = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): lowerCamelCase__ = pipe(prompt, **generate_kwargs).images[0] # save image image.save("""generated.png""")
715
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = image.size lowerCAmelCase__ , lowerCAmelCase__ : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) lowerCAmelCase__ : int = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0 lowerCAmelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 ) lowerCAmelCase__ : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) return 2.0 * image - 1.0 class A__ ( __magic_name__ ): def __init__( self : List[str] , a : VQModel , a : UNetaDModel , a : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=a , unet=a , scheduler=a ) @torch.no_grad() def __call__( self : int , a : Union[torch.Tensor, PIL.Image.Image] = None , a : Optional[int] = 1 , a : Optional[int] = 100 , a : Optional[float] = 0.0 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ): '''simple docstring''' if isinstance(a , PIL.Image.Image ): lowerCAmelCase__ : str = 1 elif isinstance(a , torch.Tensor ): lowerCAmelCase__ : Union[str, Any] = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a )}''' ) if isinstance(a , PIL.Image.Image ): lowerCAmelCase__ : List[Any] = preprocess(a ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image lowerCAmelCase__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width) lowerCAmelCase__ : Optional[Any] = next(self.unet.parameters() ).dtype lowerCAmelCase__ : List[str] = randn_tensor(a , generator=a , device=self.device , dtype=a ) lowerCAmelCase__ : Any = image.to(device=self.device , dtype=a ) # set timesteps and move to the correct device self.scheduler.set_timesteps(a , device=self.device ) lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler lowerCAmelCase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCAmelCase__ : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCAmelCase__ : List[str] = {} if accepts_eta: lowerCAmelCase__ : List[Any] = eta for t in self.progress_bar(a ): # concat latents and low resolution image in the channel dimension. lowerCAmelCase__ : Union[str, Any] = torch.cat([latents, image] , dim=1 ) lowerCAmelCase__ : Dict = self.scheduler.scale_model_input(a , a ) # predict the noise residual lowerCAmelCase__ : Tuple = self.unet(a , a ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase__ : List[str] = self.scheduler.step(a , a , a , **a ).prev_sample # decode the image latents with the VQVAE lowerCAmelCase__ : Dict = self.vqvae.decode(a ).sample lowerCAmelCase__ : Tuple = torch.clamp(a , -1.0 , 1.0 ) lowerCAmelCase__ : Tuple = image / 2 + 0.5 lowerCAmelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase__ : int = self.numpy_to_pil(a ) if not return_dict: return (image,) return ImagePipelineOutput(images=a )
69
0
'''simple docstring''' from math import factorial def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 100 ) -> int: return sum(int(_lowerCAmelCase ) for x in str(factorial(_lowerCAmelCase ) ) ) if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
716
import os from collections import deque import torch from torch.utils.data import Dataset class A__ ( __magic_name__ ): def __init__( self : Union[str, Any] , a : str="" , a : str="train" ): '''simple docstring''' assert os.path.isdir(a ) lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = os.listdir(a ) for story_filename in story_filenames_list: if "summary" in story_filename: continue lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a ) if not os.path.isfile(a ): continue self.documents.append(a ) def __len__( self : Any ): '''simple docstring''' return len(self.documents ) def __getitem__( self : Dict , a : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.documents[idx] lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1] with open(a , encoding='utf-8' ) as source: lowerCAmelCase__ : List[Any] = source.read() lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a ) return document_name, story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) ) # for some unknown reason some lines miss a period, add it lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines] # gather article lines lowerCAmelCase__ : int = [] lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ ) while True: try: lowerCAmelCase__ : int = lines.popleft() if element.startswith('@highlight' ): break story_lines.append(SCREAMING_SNAKE_CASE_ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) ) return story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')'] if line.startswith('@highlight' ): return line if line[-1] in END_TOKENS: return line return line + "." def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if len(SCREAMING_SNAKE_CASE_ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) ) return sequence def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = sequence == pad_token_id lowerCAmelCase__ : Optional[int] = 0 return mask def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines] lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence] lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines] lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = [] for sequence in batch: lowerCAmelCase__ : Union[str, Any] = -1 lowerCAmelCase__ : int = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(SCREAMING_SNAKE_CASE_ ) return torch.tensor(SCREAMING_SNAKE_CASE_ )
69
0
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class A__ ( UpperCamelCase__ ): lowercase = 42 class A__ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self : Optional[Any] , a : List[str] = 16 , a : Optional[Any] = 88 , a : Optional[int] = None , a : str = None , a : List[str] = 1 , a : Optional[int] = 0.0 , a : Optional[int] = 32 , a : List[Any] = None , a : Optional[Any] = False , a : int = None , a : Tuple = "geglu" , a : Optional[Any] = True , a : Union[str, Any] = True , ): '''simple docstring''' super().__init__() lowerCAmelCase__ : int = num_attention_heads lowerCAmelCase__ : str = attention_head_dim lowerCAmelCase__ : Optional[Any] = num_attention_heads * attention_head_dim lowerCAmelCase__ : Union[str, Any] = in_channels lowerCAmelCase__ : Optional[int] = torch.nn.GroupNorm(num_groups=a , num_channels=a , eps=1E-6 , affine=a ) lowerCAmelCase__ : int = nn.Linear(a , a ) # 3. Define transformers blocks lowerCAmelCase__ : List[str] = nn.ModuleList( [ BasicTransformerBlock( a , a , a , dropout=a , cross_attention_dim=a , activation_fn=a , attention_bias=a , double_self_attention=a , norm_elementwise_affine=a , ) for d in range(a ) ] ) lowerCAmelCase__ : Union[str, Any] = nn.Linear(a , a ) def _lowerCamelCase ( self : Optional[Any] , a : Any , a : List[str]=None , a : Optional[Any]=None , a : str=None , a : Tuple=1 , a : Tuple=None , a : int = True , ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = hidden_states.shape lowerCAmelCase__ : str = batch_frames // num_frames lowerCAmelCase__ : Optional[Any] = hidden_states lowerCAmelCase__ : Union[str, Any] = hidden_states[None, :].reshape(a , a , a , a , a ) lowerCAmelCase__ : Any = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) lowerCAmelCase__ : Optional[int] = self.norm(a ) lowerCAmelCase__ : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , a , a ) lowerCAmelCase__ : Dict = self.proj_in(a ) # 2. Blocks for block in self.transformer_blocks: lowerCAmelCase__ : Dict = block( a , encoder_hidden_states=a , timestep=a , cross_attention_kwargs=a , class_labels=a , ) # 3. Output lowerCAmelCase__ : Union[str, Any] = self.proj_out(a ) lowerCAmelCase__ : Any = ( hidden_states[None, None, :] .reshape(a , a , a , a , a ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) lowerCAmelCase__ : List[str] = hidden_states.reshape(a , a , a , a ) lowerCAmelCase__ : int = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=a )
717
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCamelCase__ = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowerCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowerCamelCase__ = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") lowerCamelCase__ = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ : int = None # source code of `config_class` lowerCAmelCase__ : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('/' ): lowerCAmelCase__ : Union[str, Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase__ : Dict = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: lowerCAmelCase__ : str = ckpt_name break return checkpoint def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : Union[str, Any] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase__ : Union[str, Any] = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCAmelCase__ : List[str] = '\n'.join(sorted(SCREAMING_SNAKE_CASE_ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
69
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL lowerCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__lowerCAmelCase ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class A__ ( UpperCAmelCase__ ): lowercase = ["pixel_values"] def __init__( self : List[str] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Any , ): '''simple docstring''' super().__init__(**lowerCamelCase__ ) lowerCAmelCase__ : List[str] = size if size is not None else {"shortest_edge": 256} lowerCAmelCase__ : Any = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) lowerCAmelCase__ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224} lowerCAmelCase__ : Optional[Any] = get_size_dict(lowerCamelCase__ , param_name='crop_size' ) lowerCAmelCase__ : Optional[int] = do_resize lowerCAmelCase__ : int = size lowerCAmelCase__ : Dict = do_center_crop lowerCAmelCase__ : Optional[Any] = crop_size lowerCAmelCase__ : int = resample lowerCAmelCase__ : Optional[Any] = do_rescale lowerCAmelCase__ : str = rescale_factor lowerCAmelCase__ : Union[str, Any] = offset lowerCAmelCase__ : int = do_normalize lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ): '''simple docstring''' lowerCAmelCase__ : int = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) if "shortest_edge" in size: lowerCAmelCase__ : Union[str, Any] = get_resize_output_image_size(lowerCamelCase__ , size['shortest_edge'] , default_to_square=lowerCamelCase__ ) elif "height" in size and "width" in size: lowerCAmelCase__ : Any = (size["height"], size["width"]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(lowerCamelCase__ , size=(size['height'], size['width']) , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _lowerCamelCase ( self : Any , a : np.ndarray , a : Union[int, float] , a : bool = True , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ): '''simple docstring''' lowerCAmelCase__ : Dict = image.astype(np.floataa ) if offset: lowerCAmelCase__ : Optional[Any] = image - (scale / 2) return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _lowerCamelCase ( self : str , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ): '''simple docstring''' return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _lowerCamelCase ( self : List[str] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) if offset and not do_rescale: raise ValueError('For offset, do_rescale must also be set to True.' ) # All transformations expect numpy arrays. lowerCAmelCase__ : int = to_numpy_array(lowerCamelCase__ ) if do_resize: lowerCAmelCase__ : List[str] = self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) if do_center_crop: lowerCAmelCase__ : Dict = self.center_crop(lowerCamelCase__ , size=lowerCamelCase__ ) if do_rescale: lowerCAmelCase__ : List[Any] = self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ , offset=lowerCamelCase__ ) if do_normalize: lowerCAmelCase__ : Any = self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) lowerCAmelCase__ : Union[str, Any] = to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) return image def _lowerCamelCase ( self : Optional[Any] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Union[str, Any] , ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ : Union[str, Any] = resample if resample is not None else self.resample lowerCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ : Dict = offset if offset is not None else self.offset lowerCAmelCase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ : int = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ : int = image_std if image_std is not None else self.image_std lowerCAmelCase__ : Dict = size if size is not None else self.size lowerCAmelCase__ : Union[str, Any] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) lowerCAmelCase__ : int = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ : Optional[Any] = get_size_dict(lowerCamelCase__ , param_name='crop_size' ) if not valid_images(lowerCamelCase__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) lowerCAmelCase__ : Any = make_batched(lowerCamelCase__ ) lowerCAmelCase__ : Optional[Any] = [ [ self._preprocess_image( image=lowerCamelCase__ , do_resize=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , do_center_crop=lowerCamelCase__ , crop_size=lowerCamelCase__ , do_rescale=lowerCamelCase__ , rescale_factor=lowerCamelCase__ , offset=lowerCamelCase__ , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , data_format=lowerCamelCase__ , ) for img in video ] for video in videos ] lowerCAmelCase__ : Dict = {"pixel_values": videos} return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
718
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
69
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict: lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : List[Any] = len(lowercase__ ) for i in range(n - 1 ): for j in range(i + 1 , lowercase__ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if len(lowercase__ ) <= 1: return arr, 0 lowerCAmelCase__ : List[str] = len(lowercase__ ) // 2 lowerCAmelCase__ : List[str] = arr[0:mid] lowerCAmelCase__ : List[str] = arr[mid:] lowerCAmelCase__ , lowerCAmelCase__ : str = count_inversions_recursive(lowercase__ ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = count_inversions_recursive(lowercase__ ) lowerCAmelCase__ , lowerCAmelCase__ : Dict = _count_cross_inversions(lowercase__ , lowercase__ ) lowerCAmelCase__ : List[Any] = inversion_p + inversions_q + cross_inversions return c, num_inversions def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : List[Any] = 0 while i < len(lowercase__ ) and j < len(lowercase__ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(lowercase__ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(lowercase__ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : Union[str, Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) lowerCAmelCase__ : str = count_inversions_bf(lowercase__ ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = count_inversions_recursive(lowercase__ ) assert num_inversions_bf == num_inversions_recursive == 8 print('number of inversions = ' , lowercase__ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() lowerCAmelCase__ : List[str] = count_inversions_bf(lowercase__ ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = count_inversions_recursive(lowercase__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , lowercase__ ) # an empty list should also have zero inversions lowerCAmelCase__ : Any = [] lowerCAmelCase__ : List[str] = count_inversions_bf(lowercase__ ) lowerCAmelCase__ , lowerCAmelCase__ : str = count_inversions_recursive(lowercase__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , lowercase__ ) if __name__ == "__main__": main()
719
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ = { """configuration_chinese_clip""": [ """CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ChineseCLIPConfig""", """ChineseCLIPOnnxConfig""", """ChineseCLIPTextConfig""", """ChineseCLIPVisionConfig""", ], """processing_chinese_clip""": ["""ChineseCLIPProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ChineseCLIPFeatureExtractor"""] lowerCamelCase__ = ["""ChineseCLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ChineseCLIPModel""", """ChineseCLIPPreTrainedModel""", """ChineseCLIPTextModel""", """ChineseCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
69
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {} class A__ ( __lowercase ): lowercase = '''llama''' lowercase = ['''past_key_values'''] def __init__( self : Optional[Any] , a : Union[str, Any]=32_000 , a : Optional[Any]=4_096 , a : List[Any]=11_008 , a : List[str]=32 , a : List[Any]=32 , a : List[Any]=None , a : int="silu" , a : List[Any]=2_048 , a : List[str]=0.0_2 , a : Optional[int]=1E-6 , a : Any=True , a : Optional[Any]=0 , a : List[str]=1 , a : int=2 , a : List[str]=1 , a : List[Any]=False , a : str=None , **a : List[Any] , ): '''simple docstring''' lowerCAmelCase__ : Dict = vocab_size lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : Tuple = hidden_size lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : List[Any] = num_attention_heads # for backward compatibility if num_key_value_heads is None: lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : int = num_key_value_heads lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : List[Any] = rms_norm_eps lowerCAmelCase__ : List[Any] = pretraining_tp lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : List[str] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , ) def _lowerCamelCase ( self : str ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'''got {self.rope_scaling}''' ) lowerCAmelCase__ : Tuple = self.rope_scaling.get('type' , __a ) lowerCAmelCase__ : str = self.rope_scaling.get('factor' , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
720
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowerCamelCase__ = logging.getLogger(__name__) torch.set_grad_enabled(False) lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]: lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict: lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for title, text in zip(documents['title'] , documents['text'] ): if text is not None: for passage in split_text(SCREAMING_SNAKE_CASE_ ): titles.append(title if title is not None else '' ) texts.append(SCREAMING_SNAKE_CASE_ ) return {"title": titles, "text": texts} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict: lowerCAmelCase__ : List[str] = ctx_tokenizer( documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids'] lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: ###################################### logger.info('Step 1 - Create the dataset' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowerCAmelCase__ : str = load_dataset( 'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowerCAmelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc ) # And compute the embeddings lowerCAmelCase__ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowerCAmelCase__ : List[Any] = Features( {'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space lowerCAmelCase__ : List[Any] = dataset.map( partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , ) # And finally save your dataset lowerCAmelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' ) dataset.save_to_disk(SCREAMING_SNAKE_CASE_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('Step 2 - Index the dataset' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowerCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ ) # And save the index lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' ) dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class A__ : lowercase = field( default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowercase = field( default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowercase = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowercase = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowercase = field( default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class A__ : lowercase = field( default=__magic_name__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowercase = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class A__ : lowercase = field( default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowercase = field( default=128 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowerCamelCase__ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
69
0
import math from numpy import inf from scipy.integrate import quad def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: if num <= 0: raise ValueError('math domain error' ) return quad(_UpperCamelCase , 0 , _UpperCamelCase , args=(_UpperCamelCase) )[0] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: return math.pow(_UpperCamelCase , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
721
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A__ ( __magic_name__ ): lowercase = (DDPMParallelScheduler,) def _lowerCamelCase ( self : str , **a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : str = { 'num_train_timesteps': 1_000, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**a ) return config def _lowerCamelCase ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=a ) def _lowerCamelCase ( self : int ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=a , beta_end=a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' self.check_over_configs(thresholding=a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=a , prediction_type=a , sample_max_value=a , ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=a ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : List[str] = scheduler_class(**a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5 def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Any = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : int = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Tuple = self.dummy_model() lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1 lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1 lowerCAmelCase__ : Tuple = samplea.shape[0] lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a ) lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) lowerCAmelCase__ : str = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3 def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.scheduler_classes[0] lowerCAmelCase__ : List[Any] = self.get_scheduler_config() lowerCAmelCase__ : Dict = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Any = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter lowerCAmelCase__ : Tuple = torch.manual_seed(0 ) for t in reversed(range(a ) ): # 1. predict noise residual lowerCAmelCase__ : Optional[Any] = model(a , a ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample lowerCAmelCase__ : List[str] = pred_prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3 def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase__ : int = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Optional[int] = self.dummy_model() lowerCAmelCase__ : List[str] = self.dummy_sample_deter lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(a ) ): # 1. predict noise residual lowerCAmelCase__ : List[Any] = model(a , a ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample lowerCAmelCase__ : str = pred_prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3 def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=a ) lowerCAmelCase__ : List[Any] = scheduler.timesteps for i, timestep in enumerate(a ): if i == len(a ) - 1: lowerCAmelCase__ : Tuple = -1 else: lowerCAmelCase__ : Dict = timesteps[i + 1] lowerCAmelCase__ : str = scheduler.previous_timestep(a ) lowerCAmelCase__ : int = prev_t.item() self.assertEqual(a , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : Optional[int] = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**a ) lowerCAmelCase__ : str = [100, 87, 50, 51, 0] with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : str = [100, 87, 50, 1, 0] lowerCAmelCase__ : int = len(a ) with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=a , timesteps=a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps] with self.assertRaises( a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=a )
69
0
lowerCamelCase__ = { """joule""": 1.0, """kilojoule""": 1000, """megajoule""": 100_0000, """gigajoule""": 10_0000_0000, """wattsecond""": 1.0, """watthour""": 3600, """kilowatthour""": 360_0000, """newtonmeter""": 1.0, """calorie_nutr""": 4186.8, """kilocalorie_nutr""": 418_6800.00, """electronvolt""": 1.6_0_2_1_7_6_6_3_4E-1_9, """britishthermalunit_it""": 1055.0_5585, """footpound""": 1.35_5818, } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowerCAmelCase__ : List[Any] = ( F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' F'''Valid values are: {", ".join(SCREAMING_SNAKE_CASE_ )}''' ) raise ValueError(SCREAMING_SNAKE_CASE_ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
700
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A__ ( __magic_name__ ): lowercase = ['image_processor', 'tokenizer'] lowercase = 'LayoutLMv3ImageProcessor' lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a , ) lowerCAmelCase__ : int = kwargs.pop('feature_extractor' ) lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a , a ) def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(a , a ): lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase__ : List[str] = features['words'] lowerCAmelCase__ : List[Any] = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , ) # add pixel values lowerCAmelCase__ : Tuple = features.pop('pixel_values' ) if return_overflowing_tokens is True: lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] ) lowerCAmelCase__ : List[str] = images return encoded_inputs def _lowerCamelCase ( self : Any , a : List[str] , a : int ): '''simple docstring''' lowerCAmelCase__ : int = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(a ) != len(a ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f''' {len(a )} and {len(a )}''' ) return images_with_overflow def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*a , **a ) def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*a , **a ) @property def _lowerCamelCase ( self : int ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , ) return self.image_processor_class @property def _lowerCamelCase ( self : Dict ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , ) return self.image_processor
69
0
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class A__ : lowercase = field( default=__magic_name__ , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) lowercase = field( default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , ) lowercase = field( default=__magic_name__ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) lowercase = field( default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowercase = field( default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowercase = field( default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) lowercase = field( default=__magic_name__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) lowercase = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) lowercase = field( default=__magic_name__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' ) @dataclass class A__ : lowercase = field( default=__magic_name__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) lowercase = field( default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) lowercase = field(default=__magic_name__ , metadata={'help': 'The input training data file (a text file).'} ) lowercase = field( default=__magic_name__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) lowercase = field( default=__magic_name__ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , ) lowercase = field( default=__magic_name__ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , ) lowercase = field( default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowercase = field( default=5 , metadata={ 'help': 'The percentage of the train set used as validation set in case there\'s no validation split' } , ) lowercase = field( default=__magic_name__ , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated. Default to the max input length of the model.' ) } , ) lowercase = field( default=__magic_name__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , ) lowercase = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) lowercase = field( default=__magic_name__ , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) def _lowerCamelCase ( self : int ): '''simple docstring''' if self.train_file is not None: lowerCAmelCase__ : Optional[Any] = self.train_file.split('.' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: lowerCAmelCase__ : Any = self.validation_file.split('.' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: lowerCAmelCase__ : Optional[int] = [json.loads(_SCREAMING_SNAKE_CASE ) for line in f.read().splitlines() if (len(_SCREAMING_SNAKE_CASE ) > 0 and not line.isspace())] assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Any = {c: dataset[c] for c in dataset.column_names} lowerCAmelCase__ : Any = refs return Dataset.from_dict(_SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( ) -> List[str]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = parser.parse_args_into_dataclasses() # Detecting last checkpoint. lowerCAmelCase__ : Optional[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCAmelCase__ : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCAmelCase__ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): lowerCAmelCase__ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , ) lowerCAmelCase__ : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , ) else: lowerCAmelCase__ : int = {} if data_args.train_file is not None: lowerCAmelCase__ : str = data_args.train_file if data_args.validation_file is not None: lowerCAmelCase__ : Optional[Any] = data_args.validation_file lowerCAmelCase__ : List[Any] = data_args.train_file.split('.' )[-1] if extension == "txt": lowerCAmelCase__ : Union[str, Any] = 'text' lowerCAmelCase__ : int = load_dataset(_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase__ : Dict = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: lowerCAmelCase__ : Tuple = AutoConfig.from_pretrained(model_args.config_name , **_SCREAMING_SNAKE_CASE ) elif model_args.model_name_or_path: lowerCAmelCase__ : Optional[int] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE ) else: lowerCAmelCase__ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) lowerCAmelCase__ : List[Any] = { 'cache_dir': model_args.cache_dir, 'use_fast': model_args.use_fast_tokenizer, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: lowerCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_SCREAMING_SNAKE_CASE ) elif model_args.model_name_or_path: lowerCAmelCase__ : str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) if model_args.model_name_or_path: lowerCAmelCase__ : Tuple = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) lowerCAmelCase__ : Dict = AutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: lowerCAmelCase__ : Tuple = datasets['train'].column_names else: lowerCAmelCase__ : Optional[int] = datasets['validation'].column_names lowerCAmelCase__ : Optional[int] = 'text' if 'text' in column_names else column_names[0] lowerCAmelCase__ : List[str] = 'max_length' if data_args.pad_to_max_length else False def tokenize_function(SCREAMING_SNAKE_CASE_ ): # Remove empty lines lowerCAmelCase__ : Dict = [line for line in examples['text'] if len(_SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] return tokenizer(examples['text'] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length ) lowerCAmelCase__ : int = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: lowerCAmelCase__ : str = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: lowerCAmelCase__ : List[Any] = add_chinese_references( tokenized_datasets['validation'] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer lowerCAmelCase__ : Any = data_args.train_ref_file or data_args.validation_ref_file if has_ref: lowerCAmelCase__ : Optional[Any] = False # Data collator # This one will take care of randomly masking the tokens. lowerCAmelCase__ : str = DataCollatorForWholeWordMask(tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer lowerCAmelCase__ : Dict = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: if last_checkpoint is not None: lowerCAmelCase__ : Any = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): lowerCAmelCase__ : Optional[Any] = model_args.model_name_or_path else: lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : List[str] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() # Saves the tokenizer too for easy upload lowerCAmelCase__ : Dict = os.path.join(training_args.output_dir , 'train_results.txt' ) if trainer.is_world_process_zero(): with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer: logger.info('***** Train results *****' ) for key, value in sorted(train_result.metrics.items() ): logger.info(F''' {key} = {value}''' ) writer.write(F'''{key} = {value}\n''' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # Evaluation lowerCAmelCase__ : int = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) lowerCAmelCase__ : Optional[int] = trainer.evaluate() lowerCAmelCase__ : Tuple = math.exp(eval_output['eval_loss'] ) lowerCAmelCase__ : Tuple = perplexity lowerCAmelCase__ : Optional[int] = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' ) if trainer.is_world_process_zero(): with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in sorted(results.items() ): logger.info(F''' {key} = {value}''' ) writer.write(F'''{key} = {value}\n''' ) return results def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
701
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class A__ ( __magic_name__ ): def __init__( self : List[str] , a : Optional[Any] , a : int=13 , a : str=7 , a : Any=True , a : List[str]=True , a : Any=False , a : List[Any]=True , a : List[str]=99 , a : Optional[Any]=32 , a : List[str]=5 , a : List[Any]=4 , a : List[Any]=64 , a : List[Any]="gelu" , a : List[Any]=0.1 , a : List[Any]=0.1 , a : int=512 , a : Tuple=16 , a : List[str]=2 , a : int=0.0_2 , a : Union[str, Any]=3 , a : Any=4 , a : Union[str, Any]=None , a : Union[str, Any]=2 , a : List[str]=2 , a : int=2 , a : Dict=2 , a : List[str]=4 , a : str=1 , ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = parent lowerCAmelCase__ : int = batch_size lowerCAmelCase__ : str = seq_length lowerCAmelCase__ : Tuple = is_training lowerCAmelCase__ : List[str] = use_input_mask lowerCAmelCase__ : Optional[int] = use_token_type_ids lowerCAmelCase__ : Any = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : str = hidden_size lowerCAmelCase__ : str = num_hidden_layers lowerCAmelCase__ : List[str] = num_attention_heads lowerCAmelCase__ : int = intermediate_size lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : Optional[int] = type_vocab_size lowerCAmelCase__ : Dict = type_sequence_label_size lowerCAmelCase__ : Optional[int] = initializer_range lowerCAmelCase__ : List[Any] = num_labels lowerCAmelCase__ : Any = num_choices lowerCAmelCase__ : str = scope lowerCAmelCase__ : Any = q_groups lowerCAmelCase__ : Any = k_groups lowerCAmelCase__ : Union[str, Any] = v_groups lowerCAmelCase__ : int = post_attention_groups lowerCAmelCase__ : str = intermediate_groups lowerCAmelCase__ : Union[str, Any] = output_groups def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : Tuple = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : Tuple = None if self.use_labels: lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self : str ): '''simple docstring''' return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : str , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = SqueezeBertModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = model(a , a ) lowerCAmelCase__ : Any = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : int , a : Union[str, Any] , a : Tuple , a : Any ): '''simple docstring''' lowerCAmelCase__ : List[str] = SqueezeBertForMaskedLM(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Any = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[Any] , a : str , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = SqueezeBertForQuestionAnswering(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = model( a , attention_mask=a , start_positions=a , end_positions=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self : Tuple , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : str , a : str , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.num_labels lowerCAmelCase__ : Dict = SqueezeBertForSequenceClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Any , a : int , a : Any , a : Dict , a : Any , a : Tuple , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = self.num_labels lowerCAmelCase__ : Dict = SqueezeBertForTokenClassification(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self : str , a : Optional[int] , a : List[Any] , a : int , a : List[Any] , a : Union[str, Any] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = model( a , attention_mask=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.prepare_config_and_inputs() ((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = config_and_inputs lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) lowercase = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = True lowercase = False def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = SqueezeBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , dim=37 ) def _lowerCamelCase ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a ) @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Optional[int] = SqueezeBertModel.from_pretrained(a ) self.assertIsNotNone(a ) @require_sentencepiece @require_tokenizers @require_torch class A__ ( unittest.TestCase ): @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) lowerCAmelCase__ : str = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] ) lowerCAmelCase__ : Any = model(a )[0] lowerCAmelCase__ : Tuple = torch.Size((1, 3) ) self.assertEqual(output.shape , a ) lowerCAmelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] ) self.assertTrue(torch.allclose(a , a , atol=1E-4 ) )
69
0
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class A__ ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : Dict , a : Optional[int] , a : List[Any] , a : Optional[Any] , a : Optional[int] = 1.0 , a : Optional[int] = None , ): '''simple docstring''' super().__init__() lowerCAmelCase__ : int = initial_learning_rate lowerCAmelCase__ : Optional[int] = warmup_steps lowerCAmelCase__ : List[Any] = power lowerCAmelCase__ : Optional[Any] = decay_schedule_fn lowerCAmelCase__ : int = name def __call__( self : Optional[Any] , a : Any ): '''simple docstring''' with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. lowerCAmelCase__ : Union[str, Any] = tf.cast(__UpperCamelCase , tf.floataa ) lowerCAmelCase__ : Any = tf.cast(self.warmup_steps , tf.floataa ) lowerCAmelCase__ : Any = global_step_float / warmup_steps_float lowerCAmelCase__ : List[str] = self.initial_learning_rate * tf.math.pow(__UpperCamelCase , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__UpperCamelCase , ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.999 , SCREAMING_SNAKE_CASE_ = 1e-8 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> str: lowerCAmelCase__ : Dict = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=lowercase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowercase__ , ) if num_warmup_steps: lowerCAmelCase__ : List[Any] = WarmUp( initial_learning_rate=lowercase__ , decay_schedule_fn=lowercase__ , warmup_steps=lowercase__ , ) if weight_decay_rate > 0.0: lowerCAmelCase__ : Optional[Any] = AdamWeightDecay( learning_rate=lowercase__ , weight_decay_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=lowercase__ , ) else: lowerCAmelCase__ : int = tf.keras.optimizers.Adam( learning_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class A__ ( __snake_case ): def __init__( self : int , a : List[Any] = 0.0_0_1 , a : Union[str, Any] = 0.9 , a : Optional[Any] = 0.9_9_9 , a : Union[str, Any] = 1E-7 , a : List[str] = False , a : Tuple = 0.0 , a : Optional[int] = None , a : Tuple = None , a : Any = "AdamWeightDecay" , **a : Optional[int] , ): '''simple docstring''' super().__init__(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) lowerCAmelCase__ : Optional[int] = weight_decay_rate lowerCAmelCase__ : Tuple = include_in_weight_decay lowerCAmelCase__ : int = exclude_from_weight_decay @classmethod def _lowerCamelCase ( cls : Any , a : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = {'WarmUp': WarmUp} return super(__UpperCamelCase , cls ).from_config(__UpperCamelCase , custom_objects=__UpperCamelCase ) def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Optional[Any] , a : Union[str, Any] ): '''simple docstring''' super(__UpperCamelCase , self )._prepare_local(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) lowerCAmelCase__ : List[Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def _lowerCamelCase ( self : Dict , a : int , a : str , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : int = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def _lowerCamelCase ( self : Optional[int] , a : Any , a : int=None , **a : List[Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : int = list(zip(*__UpperCamelCase ) ) return super(__UpperCamelCase , self ).apply_gradients(zip(__UpperCamelCase , __UpperCamelCase ) , name=__UpperCamelCase , **__UpperCamelCase ) def _lowerCamelCase ( self : Optional[Any] , a : str , a : str , a : Optional[Any] ): '''simple docstring''' if apply_state is None: return self._decayed_lr_t[var_dtype], {} lowerCAmelCase__ : List[Any] = apply_state or {} lowerCAmelCase__ : int = apply_state.get((var_device, var_dtype) ) if coefficients is None: lowerCAmelCase__ : Any = self._fallback_apply_state(__UpperCamelCase , __UpperCamelCase ) lowerCAmelCase__ : List[Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def _lowerCamelCase ( self : Tuple , a : Dict , a : Tuple , a : Any=None ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_lr(var.device , var.dtype.base_dtype , __UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = self._decay_weights_op(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) with tf.control_dependencies([decay] ): return super(__UpperCamelCase , self )._resource_apply_dense(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) def _lowerCamelCase ( self : Any , a : List[str] , a : Union[str, Any] , a : Optional[Any] , a : Dict=None ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , __UpperCamelCase ) lowerCAmelCase__ : str = self._decay_weights_op(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) with tf.control_dependencies([decay] ): return super(__UpperCamelCase , self )._resource_apply_sparse(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def _lowerCamelCase ( self : Optional[Any] , a : Tuple ): '''simple docstring''' if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(__UpperCamelCase , __UpperCamelCase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(__UpperCamelCase , __UpperCamelCase ) is not None: return False return True class A__ ( __snake_case ): def __init__( self : str ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Optional[Any] = None @property def _lowerCamelCase ( self : Tuple ): '''simple docstring''' if self._accum_steps is None: lowerCAmelCase__ : List[Any] = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=__UpperCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def _lowerCamelCase ( self : Tuple ): '''simple docstring''' if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : List[str] , a : Any ): '''simple docstring''' if not self._gradients: lowerCAmelCase__ : Optional[Any] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(__UpperCamelCase ) , trainable=__UpperCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(__UpperCamelCase ) != len(self._gradients ): raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(__UpperCamelCase )}''' ) for accum_gradient, gradient in zip(self._gradients , __UpperCamelCase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(__UpperCamelCase ) self._accum_steps.assign_add(1 ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(__UpperCamelCase ) )
702
lowerCamelCase__ = """Alexander Joslin""" import operator as op from .stack import Stack def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} lowerCAmelCase__ : Stack[int] = Stack() lowerCAmelCase__ : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) ) elif i in operators: # RULE 2 operator_stack.push(SCREAMING_SNAKE_CASE_ ) elif i == ")": # RULE 4 lowerCAmelCase__ : List[Any] = operator_stack.peek() operator_stack.pop() lowerCAmelCase__ : List[str] = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ : List[Any] = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) operand_stack.push(SCREAMING_SNAKE_CASE_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
69
0
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class A__ ( a__ ): def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() lowerCAmelCase__ : Union[str, Any] = 8 # DPR tok lowerCAmelCase__ : Dict = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(lowercase__ , exist_ok=lowercase__ ) lowerCAmelCase__ : Union[str, Any] = os.path.join(lowercase__ , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase__ : Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase__ : str = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) lowerCAmelCase__ : Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase__ : str = {'''unk_token''': '''<unk>'''} lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(lowercase__ , exist_ok=lowercase__ ) lowerCAmelCase__ : Dict = os.path.join(lowercase__ , BART_VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ : List[str] = os.path.join(lowercase__ , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowercase__ ) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _lowerCamelCase ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) @require_tokenizers def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , 'rag_tokenizer' ) lowerCAmelCase__ : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) lowerCAmelCase__ : int = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(lowercase__ ) rag_tokenizer.save_pretrained(lowercase__ ) lowerCAmelCase__ : Optional[Any] = RagTokenizer.from_pretrained(lowercase__ , config=lowercase__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , lowercase__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , lowercase__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' ) lowerCAmelCase__ : int = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] lowerCAmelCase__ : Optional[Any] = tokenizer(lowercase__ ) self.assertIsNotNone(lowercase__ ) @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) lowerCAmelCase__ : str = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] lowerCAmelCase__ : Any = tokenizer(lowercase__ ) self.assertIsNotNone(lowercase__ )
703
import numpy class A__ : def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ): '''simple docstring''' lowerCAmelCase__ : int = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCAmelCase__ : Dict = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCAmelCase__ : List[str] = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCAmelCase__ : str = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : str = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCAmelCase__ : Tuple = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCAmelCase__ : Any = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCAmelCase__ : Optional[Any] = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCAmelCase__ : int = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ): '''simple docstring''' for iteration in range(1 , iterations + 1 ): lowerCAmelCase__ : Any = self.feedforward() self.back_propagation() if give_loss: lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'''Iteration {iteration} Loss: {loss}''' ) def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ): '''simple docstring''' lowerCAmelCase__ : Dict = input_arr lowerCAmelCase__ : Any = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCAmelCase__ : int = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCAmelCase__ : List[Any] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray: return (value) * (1 - (value)) def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : Any = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork( input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
69
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""} lowerCamelCase__ = { """vocab_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""", }, """emoji_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""", }, } lowerCamelCase__ = { """abeja/gpt-neox-japanese-2.7b""": 2048, } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f: lowerCAmelCase__ : int = json.loads(f.read() ) lowerCAmelCase__ : int = collections.OrderedDict() lowerCAmelCase__ : Optional[int] = collections.OrderedDict() lowerCAmelCase__ : int = collections.OrderedDict() with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f: lowerCAmelCase__ : Optional[Any] = f.readlines() lowerCAmelCase__ : Any = [[t.rstrip('\n' )] if (t == """,""" or """,""" not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ : Dict = b lowerCAmelCase__ : List[Any] = idx for wd in b: lowerCAmelCase__ : Optional[Any] = idx return vocab, raw_vocab, ids_to_tokens, emoji class A__ ( _UpperCAmelCase ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["""input_ids""", """attention_mask"""] def __init__( self : Union[str, Any] , a : Tuple , a : int , a : Tuple="<|endoftext|>" , a : str="<|endoftext|>" , a : List[Any]="<|startoftext|>" , a : Optional[Any]="<|endoftext|>" , a : Optional[Any]=False , **a : Optional[int] , ): '''simple docstring''' super().__init__( unk_token=lowercase__ , pad_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , do_clean_text=lowercase__ , **lowercase__ , ) if not os.path.isfile(lowercase__ ): raise ValueError( f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained''' ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) if not os.path.isfile(lowercase__ ): raise ValueError( f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google''' ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) lowerCAmelCase__ : Optional[int] = do_clean_text lowerCAmelCase__ : Union[str, Any] = load_vocab_and_emoji(lowercase__ , lowercase__ ) lowerCAmelCase__ : Tuple = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def _lowerCamelCase ( self : str ): '''simple docstring''' return len(self.raw_vocab ) def _lowerCamelCase ( self : str ): '''simple docstring''' return dict(self.raw_vocab , **self.added_tokens_encoder ) def _lowerCamelCase ( self : Any , a : List[Any] ): '''simple docstring''' return self.subword_tokenizer.tokenize(lowercase__ , clean=self.do_clean_text ) def _lowerCamelCase ( self : Tuple , a : Union[str, Any] ): '''simple docstring''' return self.vocab.get(lowercase__ , self.vocab.get(self.unk_token ) ) def _lowerCamelCase ( self : Dict , a : Union[str, Any] ): '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(lowercase__ ) def _lowerCamelCase ( self : Optional[Any] , a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = """""".join(lowercase__ ).strip() return out_string def _lowerCamelCase ( self : int , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : Tuple = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: lowerCAmelCase__ : Dict = input_ids[-self.model_max_length :] return input_ids def _lowerCamelCase ( self : str , a : List[str] , a : Any = None ): '''simple docstring''' lowerCAmelCase__ : int = 0 if os.path.isdir(lowercase__ ): lowerCAmelCase__ : Optional[int] = os.path.join( lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ : Tuple = os.path.join( lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] ) else: lowerCAmelCase__ : Optional[Any] = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : Union[str, Any] = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""] ) with open(lowercase__ , 'w' , encoding='utf-8' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase__ : int = token_index writer.write(','.join(lowercase__ ) + '\n' ) index += 1 with open(lowercase__ , 'w' , encoding='utf-8' ) as writer: json.dump(self.emoji , lowercase__ ) return vocab_file, emoji_file class A__ ( _UpperCAmelCase ): def __init__( self : Tuple , a : Optional[int] , a : List[Any] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : int = vocab # same as swe lowerCAmelCase__ : Tuple = ids_to_tokens # same as bpe lowerCAmelCase__ : Any = emoji lowerCAmelCase__ : Any = np.max([len(lowercase__ ) for w in self.vocab.keys()] ) lowerCAmelCase__ : Optional[int] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' ) lowerCAmelCase__ : Dict = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' ) lowerCAmelCase__ : Optional[Any] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' ) lowerCAmelCase__ : Union[str, Any] = re.compile( R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCAmelCase__ : List[Any] = re.compile( R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCAmelCase__ : str = re.compile( R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' ) lowerCAmelCase__ : int = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿""" lowerCAmelCase__ : str = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟""" lowerCAmelCase__ : Any = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} ) def __len__( self : Tuple ): '''simple docstring''' return len(self.ids_to_tokens ) def _lowerCamelCase ( self : Tuple , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Any = self.content_repattera.sub('<URL>' , lowercase__ ) lowerCAmelCase__ : Tuple = self.content_repattera.sub('<EMAIL>' , lowercase__ ) lowerCAmelCase__ : Union[str, Any] = self.content_repattera.sub('<TEL>' , lowercase__ ) lowerCAmelCase__ : Dict = self.content_repattera.sub('<DATE>' , lowercase__ ) lowerCAmelCase__ : Tuple = self.content_repattera.sub('<DATE>' , lowercase__ ) lowerCAmelCase__ : Tuple = self.content_repattera.sub('<PRICE>' , lowercase__ ) lowerCAmelCase__ : Optional[Any] = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: lowerCAmelCase__ : Tuple = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' ) return content def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[Any]=False ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = text.replace(' ' , '<SP>' ) lowerCAmelCase__ : Optional[int] = text.replace(' ' , '<SP>' ) lowerCAmelCase__ : Any = text.replace('\r\n' , '<BR>' ) lowerCAmelCase__ : Dict = text.replace('\n' , '<BR>' ) lowerCAmelCase__ : Optional[Any] = text.replace('\r' , '<BR>' ) lowerCAmelCase__ : Optional[Any] = text.replace('\t' , '<TAB>' ) lowerCAmelCase__ : List[str] = text.replace('—' , 'ー' ) lowerCAmelCase__ : Union[str, Any] = text.replace('−' , 'ー' ) for k, v in self.emoji["emoji"].items(): if k in text: lowerCAmelCase__ : Union[str, Any] = text.replace(lowercase__ , lowercase__ ) if clean: lowerCAmelCase__ : Optional[Any] = self.clean_text(lowercase__ ) def check_simbol(a : Dict ): lowerCAmelCase__ : int = x.encode() if len(lowercase__ ) == 1 and len(lowercase__ ) == 2: lowerCAmelCase__ : Tuple = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0Xc_2a1 and c <= 0Xc_2bf) or (c >= 0Xc_780 and c <= 0Xc_783) or (c >= 0Xc_ab9 and c <= 0Xc_bbf) or (c >= 0Xc_c80 and c <= 0Xc_da2) ): return True return False def checkuae(a : Optional[int] ): lowerCAmelCase__ : int = x.encode() if len(lowercase__ ) == 1 and len(lowercase__ ) == 3: lowerCAmelCase__ : Union[str, Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0Xe28_080 and c <= 0Xe2b_07f: return True return False lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : List[str] = [] while pos < len(lowercase__ ): lowerCAmelCase__ : Tuple = min(len(lowercase__ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3 lowerCAmelCase__ : Optional[Any] = [] # (token_id, token, pos) for e in range(lowercase__ , lowercase__ , -1 ): lowerCAmelCase__ : str = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(lowercase__ ) > 2: lowerCAmelCase__ : Optional[Any] = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(lowercase__ ) > 0: # the smallest token_id is adopted lowerCAmelCase__ : Optional[int] = sorted(lowercase__ , key=lambda a : x[0] )[0] result.append(lowercase__ ) lowerCAmelCase__ : Union[str, Any] = e else: lowerCAmelCase__ : Optional[Any] = pos + 1 lowerCAmelCase__ : Tuple = text[pos:end] if check_simbol(lowercase__ ): result.append('<KIGOU>' ) elif checkuae(lowercase__ ): result.append('<U2000U2BFF>' ) else: for i in wd.encode('utf-8' ): result.append('<|byte%d|>' % i ) lowerCAmelCase__ : Tuple = end return result def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[str]="\n" ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(lowercase__ ) > 0: words.append(bytearray(lowercase__ ).decode('utf-8' , errors='replace' ) ) lowerCAmelCase__ : Tuple = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word] ) elif word == "<SP>": words.append(' ' ) elif word == "<BR>": words.append(lowercase__ ) elif word == "<TAB>": words.append('\t' ) elif word == "<BLOCK>": words.append('▀' ) elif word == "<KIGOU>": words.append('ǀ' ) elif word == "<U2000U2BFF>": words.append('‖' ) else: words.append(lowercase__ ) if len(lowercase__ ) > 0: words.append(bytearray(lowercase__ ).decode('utf-8' , errors='replace' ) ) lowerCAmelCase__ : str = """""".join(lowercase__ ) return text
704
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ): '''simple docstring''' lowerCAmelCase__ : str = parent lowerCAmelCase__ : Union[str, Any] = batch_size lowerCAmelCase__ : List[str] = image_size lowerCAmelCase__ : Optional[Any] = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Optional[int] = embed_dim lowerCAmelCase__ : Tuple = depths lowerCAmelCase__ : List[str] = num_heads lowerCAmelCase__ : List[Any] = window_size lowerCAmelCase__ : Any = mlp_ratio lowerCAmelCase__ : Optional[Any] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : int = drop_path_rate lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : int = use_absolute_embeddings lowerCAmelCase__ : List[str] = patch_norm lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = initializer_range lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : List[Any] = scope lowerCAmelCase__ : Dict = use_labels lowerCAmelCase__ : List[Any] = type_sequence_label_size lowerCAmelCase__ : Optional[Any] = encoder_stride def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : Optional[Any] = None if self.use_labels: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : int = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = model(a ) lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : str = model(a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : List[str] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.type_sequence_label_size lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs lowerCAmelCase__ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowercase = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self ) lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) @unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='Swinv2 does not use inputs_embeds' ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[int] = model_class(a ) lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Tuple = [*signature.parameters.keys()] lowerCAmelCase__ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : str = False lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : Dict = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Dict = outputs.attentions lowerCAmelCase__ : Dict = len(self.model_tester.depths ) self.assertEqual(len(a ) , a ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Optional[int] = config.window_size**2 lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCAmelCase__ : Tuple = len(a ) # Check attention is always last and order is fine lowerCAmelCase__ : str = True lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) if hasattr(self.model_tester , 'num_hidden_states_types' ): lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase__ : Any = 2 self.assertEqual(out_len + added_hidden_states , len(a ) ) lowerCAmelCase__ : Dict = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.hidden_states lowerCAmelCase__ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a ) , a ) # Swinv2 has a different seq_length lowerCAmelCase__ : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(a ) , a ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape lowerCAmelCase__ : List[str] = ( reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Any = 3 lowerCAmelCase__ : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase__ : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase__ : str = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a ) self.assertIsNotNone(a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = _config_zero_init(a ) for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(config=a ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class A__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to( a ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**a ) # verify the logits lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
69
0
from __future__ import annotations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: # Checks if the entire collection has been sorted if len(a_ ) <= 1 or n <= 1: return insert_next(a_ , n - 1 ) rec_insertion_sort(a_ , n - 1 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: # Checks order between adjacent elements if index >= len(a_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order lowerCAmelCase__ : int = ( collection[index], collection[index - 1], ) insert_next(a_ , index + 1 ) if __name__ == "__main__": lowerCamelCase__ = input("""Enter integers separated by spaces: """) lowerCamelCase__ = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
705
from itertools import permutations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False lowerCAmelCase__ : str = [7, 11, 13, 17] for i, test in enumerate(SCREAMING_SNAKE_CASE_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int: return sum( int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) for num in permutations(range(SCREAMING_SNAKE_CASE_ ) ) if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
69
0
from collections.abc import Generator def lowerCAmelCase__ ( ) -> Dict: lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = 0, 1 while True: lowerCAmelCase__ , lowerCAmelCase__ : Dict = b, a + b yield b def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1_000 ) -> Union[str, Any]: lowerCAmelCase__ : Optional[int] = 1 lowerCAmelCase__ : Tuple = fibonacci_generator() while len(str(next(SCREAMING_SNAKE_CASE_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
706
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( __magic_name__ , unittest.TestCase ): lowercase = ConsistencyModelPipeline lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt lowercase = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet' , ) return unet @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , ) return unet def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ): '''simple docstring''' if class_cond: lowerCAmelCase__ : Tuple = self.dummy_cond_unet else: lowerCAmelCase__ : Dict = self.dummy_uncond_unet # Default to CM multistep sampler lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : List[Any] = { 'unet': unet, 'scheduler': scheduler, } return components def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ): '''simple docstring''' if str(a ).startswith('mps' ): lowerCAmelCase__ : List[str] = torch.manual_seed(a ) else: lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : str = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [22, 0], 'generator': generator, 'output_type': 'np', } return inputs def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Optional[Any] = self.get_dummy_components() lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Tuple = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = self.get_dummy_inputs(a ) lowerCAmelCase__ : str = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : str = image[0, -3:, -3:, -1] lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a ) lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Tuple = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a ) lowerCAmelCase__ : int = 0 lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Dict = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a ) lowerCAmelCase__ : Optional[Any] = 1 lowerCAmelCase__ : Dict = None lowerCAmelCase__ : List[Any] = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a ) lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Optional[Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a ) lowerCAmelCase__ : Dict = 1 lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : str = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a ) lowerCAmelCase__ : List[Any] = { 'num_inference_steps': None, 'timesteps': [22, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a ) lowerCAmelCase__ : Tuple = latents return inputs def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ): '''simple docstring''' if type(a ) == str: lowerCAmelCase__ : str = torch.device(a ) lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a ) return latents def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Optional[Any] = self.get_inputs() lowerCAmelCase__ : Dict = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : Any = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[str] = self.get_inputs() lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : List[str] = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ): lowerCAmelCase__ : Dict = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : str = image[0, -3:, -3:, -1] lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a ) lowerCAmelCase__ : List[str] = 1 lowerCAmelCase__ : str = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ): lowerCAmelCase__ : List[str] = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
69
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[int] , a : Optional[int] , a : List[str]=13 , a : Optional[Any]=3 , a : Optional[Any]=224 , a : str=30 , a : Optional[Any]=400 , a : List[Any]=True , a : Any=None , a : Union[str, Any]=True , a : Dict=[0.5, 0.5, 0.5] , a : str=[0.5, 0.5, 0.5] , ): '''simple docstring''' lowerCAmelCase__ : Any = size if size is not None else {"height": 18, "width": 18} lowerCAmelCase__ : Dict = parent lowerCAmelCase__ : Optional[Any] = batch_size lowerCAmelCase__ : List[Any] = num_channels lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : Optional[int] = min_resolution lowerCAmelCase__ : int = max_resolution lowerCAmelCase__ : str = do_resize lowerCAmelCase__ : Tuple = size lowerCAmelCase__ : int = do_normalize lowerCAmelCase__ : int = image_mean lowerCAmelCase__ : Tuple = image_std def _lowerCamelCase ( self : str ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class A__ ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' lowercase = ViTImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : str = EfficientFormerImageProcessorTester(self ) @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , 'image_mean' ) ) self.assertTrue(hasattr(__a , 'image_std' ) ) self.assertTrue(hasattr(__a , 'do_normalize' ) ) self.assertTrue(hasattr(__a , 'do_resize' ) ) self.assertTrue(hasattr(__a , 'size' ) ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' pass def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input lowerCAmelCase__ : Union[str, Any] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched lowerCAmelCase__ : Any = image_processor(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input lowerCAmelCase__ : Optional[Any] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched lowerCAmelCase__ : Optional[Any] = image_processor(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input lowerCAmelCase__ : List[Any] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched lowerCAmelCase__ : Tuple = image_processor(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , )
707
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class A__ ( __magic_name__ ): def __init__( self : int , a : List[str] , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = params lowerCAmelCase__ : Union[str, Any] = np.array(a ) lowerCAmelCase__ : List[Any] = np.array([len(a ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : str , a : List[str] ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Optional[int] ): '''simple docstring''' return len(self.lengths ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.params.max_model_input_size lowerCAmelCase__ : Optional[int] = self.lengths > max_len logger.info(f'''Splitting {sum(a )} too long sequences.''' ) def divide_chunks(a : List[str] , a : Tuple ): return [l[i : i + n] for i in range(0 , len(a ) , a )] lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Union[str, Any] = [] if self.params.mlm: lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: lowerCAmelCase__ , lowerCAmelCase__ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowerCAmelCase__ : Optional[int] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: lowerCAmelCase__ : Dict = np.insert(a , 0 , a ) if sub_s[-1] != sep_id: lowerCAmelCase__ : Dict = np.insert(a , len(a ) , a ) assert len(a ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(a ) new_tok_ids.extend(a ) new_lengths.extend([len(a ) for l in sub_seqs] ) lowerCAmelCase__ : str = np.array(a ) lowerCAmelCase__ : Optional[Any] = np.array(a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = len(self ) lowerCAmelCase__ : List[Any] = self.lengths > 11 lowerCAmelCase__ : Dict = self.token_ids[indices] lowerCAmelCase__ : Tuple = self.lengths[indices] lowerCAmelCase__ : Any = len(self ) logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: lowerCAmelCase__ : int = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : str = len(self ) lowerCAmelCase__ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowerCAmelCase__ : int = (unk_occs / self.lengths) < 0.5 lowerCAmelCase__ : List[str] = self.token_ids[indices] lowerCAmelCase__ : Optional[Any] = self.lengths[indices] lowerCAmelCase__ : Union[str, Any] = len(self ) logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' if not self.params.is_master: return logger.info(f'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _lowerCamelCase ( self : int , a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = [t[0] for t in batch] lowerCAmelCase__ : List[str] = [t[1] for t in batch] assert len(a ) == len(a ) # Max for paddings lowerCAmelCase__ : List[str] = max(a ) # Pad token ids if self.params.mlm: lowerCAmelCase__ : str = self.params.special_tok_ids['pad_token'] else: lowerCAmelCase__ : Optional[int] = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : Tuple = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids] assert len(tk_ ) == len(a ) assert all(len(a ) == max_seq_len_ for t in tk_ ) lowerCAmelCase__ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) lowerCAmelCase__ : List[str] = torch.tensor(a ) # (bs) return tk_t, lg_t
69
0
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class A__ ( UpperCAmelCase_ ): def __init__( self : List[str] , a : Optional[int] , a : Optional[Any]=None , a : Dict=True , a : Union[str, Any]=None , **a : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = parent lowerCAmelCase__ : List[Any] = config_class lowerCAmelCase__ : Any = has_text_modality lowerCAmelCase__ : Union[str, Any] = kwargs lowerCAmelCase__ : Any = common_properties def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.config_class(**self.inputs_dict ) lowerCAmelCase__ : Union[str, Any] = ( ['hidden_size', 'num_attention_heads', 'num_hidden_layers'] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['vocab_size'] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(_snake_case , _snake_case ) , msg=f'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(_snake_case ): try: setattr(_snake_case , _snake_case , _snake_case ) self.parent.assertEqual( getattr(_snake_case , _snake_case ) , _snake_case , msg=f'''`{name} value {idx} expected, but was {getattr(_snake_case , _snake_case )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(_snake_case ): try: lowerCAmelCase__ : Optional[int] = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(_snake_case , _snake_case ) , _snake_case , msg=f'''`{name} value {idx} expected, but was {getattr(_snake_case , _snake_case )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.config_class(**self.inputs_dict ) lowerCAmelCase__ : Dict = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , _snake_case ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : str = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ : Tuple = os.path.join(_snake_case , 'config.json' ) config_first.to_json_file(_snake_case ) lowerCAmelCase__ : List[str] = self.config_class.from_json_file(_snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : str = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(_snake_case ) lowerCAmelCase__ : Dict = self.config_class.from_pretrained(_snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Dict = self.config_class(**self.inputs_dict ) lowerCAmelCase__ : Union[str, Any] = 'test' with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ : Optional[int] = os.path.join(_snake_case , _snake_case ) config_first.save_pretrained(_snake_case ) lowerCAmelCase__ : List[Any] = self.config_class.from_pretrained(_snake_case , subfolder=_snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) lowerCAmelCase__ : Any = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' if self.config_class.is_composition: return lowerCAmelCase__ : Optional[Any] = self.config_class() self.parent.assertIsNotNone(_snake_case ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = copy.deepcopy(_snake_case ) lowerCAmelCase__ : List[Any] = self.config_class(**_snake_case ) lowerCAmelCase__ : Dict = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) ) elif getattr(_snake_case , _snake_case ) != value: wrong_values.append((key, getattr(_snake_case , _snake_case ), value) ) if len(_snake_case ) > 0: lowerCAmelCase__ : int = '\n'.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
708
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
69
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0 ) -> Union[str, Any]: lowerCAmelCase__ : int = length or len(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = list_data[i + 1], list_data[i] lowerCAmelCase__ : Optional[int] = True return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE_ , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
709
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, """tokenizer_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""", }, } lowerCamelCase__ = { """google/rembert""": 256, } lowerCamelCase__ = """▁""" class A__ ( __magic_name__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = RemBertTokenizer def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , ) lowerCAmelCase__ : int = do_lower_case lowerCAmelCase__ : int = remove_space lowerCAmelCase__ : List[Any] = keep_accents lowerCAmelCase__ : Optional[Any] = vocab_file lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Dict = [self.sep_token_id] lowerCAmelCase__ : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1] return [1] + ([0] * len(a )) + [1] def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Tuple = [self.sep_token_id] lowerCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(a ): logger.error('Vocabulary path ({}) should be a directory'.format(a ) ) return lowerCAmelCase__ : int = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
69
0
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class A__ : @staticmethod def _lowerCamelCase ( *a : List[Any] , **a : Tuple ): '''simple docstring''' pass def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]: lowerCAmelCase__ : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: lowerCAmelCase__ : Tuple = np.array(snake_case_ ) lowerCAmelCase__ : int = npimg.shape return {"hash": hashimage(snake_case_ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class A__ ( unittest.TestCase ): lowercase = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) lowercase = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def _lowerCamelCase ( self : int , a : Any , a : str , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = MaskGenerationPipeline(model=_a , image_processor=_a ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _lowerCamelCase ( self : Tuple , a : str , a : int ): '''simple docstring''' pass @require_tf @unittest.skip('Image segmentation not implemented in TF' ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' pass @slow @require_torch def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = pipeline('mask-generation' , model='facebook/sam-vit-huge' ) lowerCAmelCase__ : Any = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 ) # Shortening by hashing lowerCAmelCase__ : int = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(_a , decimals=4 ) , [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1} ] , ) # fmt: on @require_torch @slow def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = """facebook/sam-vit-huge""" lowerCAmelCase__ : List[Any] = pipeline('mask-generation' , model=_a ) lowerCAmelCase__ : Dict = image_segmenter( 'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing lowerCAmelCase__ : Optional[int] = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(_a , decimals=4 ) , [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3}, ] , )
710
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class A__ ( __magic_name__ ): def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ): '''simple docstring''' super().__init__( features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , ) lowerCAmelCase__ : int = Generator( cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' if self.streaming: lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: lowerCAmelCase__ : Any = None lowerCAmelCase__ : int = None lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : Dict = None self.builder.download_and_prepare( download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , ) lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset( split='train' , verification_mode=a , in_memory=self.keep_in_memory ) return dataset
69
0
from queue import PriorityQueue from typing import Any import numpy as np def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCAmelCase__ : List[str] = cst_fwd.get(__UpperCamelCase , np.inf ) lowerCAmelCase__ : str = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) lowerCAmelCase__ : Any = new_cost_f lowerCAmelCase__ : str = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCAmelCase__ : str = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : Optional[int] = -1 lowerCAmelCase__ : int = set() lowerCAmelCase__ : Optional[int] = set() lowerCAmelCase__ : Tuple = {source: 0} lowerCAmelCase__ : List[Any] = {destination: 0} lowerCAmelCase__ : Any = {source: None} lowerCAmelCase__ : Dict = {destination: None} lowerCAmelCase__ : PriorityQueue[Any] = PriorityQueue() lowerCAmelCase__ : PriorityQueue[Any] = PriorityQueue() lowerCAmelCase__ : int = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCAmelCase__ : Any = queue_forward.get() visited_forward.add(__UpperCamelCase ) lowerCAmelCase__ : Optional[int] = queue_backward.get() visited_backward.add(__UpperCamelCase ) lowerCAmelCase__ : List[Any] = pass_and_relaxation( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) lowerCAmelCase__ : Dict = pass_and_relaxation( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCAmelCase__ : Tuple = shortest_distance return shortest_path_distance lowerCamelCase__ = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } lowerCamelCase__ = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
711
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) class A__ ( __magic_name__ ): lowercase = ['audio_values', 'audio_mask'] def __init__( self : Dict , a : Dict=2_048 , a : Optional[Any]=1 , a : List[Any]=[16, 16] , a : Dict=128 , a : List[str]=44_100 , a : Union[str, Any]=86 , a : Optional[Any]=2_048 , a : List[Any]=0.0 , **a : Tuple , ): '''simple docstring''' super().__init__( feature_size=a , sampling_rate=a , padding_value=a , **a , ) lowerCAmelCase__ : Optional[Any] = spectrogram_length lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Tuple = patch_size lowerCAmelCase__ : Optional[int] = feature_size // self.patch_size[1] lowerCAmelCase__ : Union[str, Any] = n_fft lowerCAmelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate lowerCAmelCase__ : int = sampling_rate lowerCAmelCase__ : Union[str, Any] = padding_value lowerCAmelCase__ : Dict = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T def _lowerCamelCase ( self : Optional[int] , a : np.array ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = spectrogram( a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , ) lowerCAmelCase__ : Any = log_spec[:, :-1] lowerCAmelCase__ : Dict = log_spec - 2_0.0 lowerCAmelCase__ : Tuple = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : str , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : int , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowerCAmelCase__ : Dict = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowerCAmelCase__ : List[Any] = is_batched_numpy or ( isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(a , np.ndarray ): lowerCAmelCase__ : Optional[Any] = np.asarray(a , dtype=np.floataa ) elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase__ : Tuple = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCAmelCase__ : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , a ): lowerCAmelCase__ : Optional[Any] = [np.asarray(a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCAmelCase__ : Optional[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCAmelCase__ : Any = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCAmelCase__ : List[Any] = np.array(a ).astype(np.floataa ) # convert into correct format for padding lowerCAmelCase__ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCAmelCase__ : Dict = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCAmelCase__ : Optional[Any] = padded_audio_features * self.padding_value for i in range(len(a ) ): lowerCAmelCase__ : Tuple = audio_features[i] lowerCAmelCase__ : List[str] = feature # return as BatchFeature if return_attention_mask: lowerCAmelCase__ : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: lowerCAmelCase__ : Any = {'audio_values': padded_audio_features} lowerCAmelCase__ : Any = BatchFeature(data=a , tensor_type=a ) return encoded_inputs
69
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""} lowerCamelCase__ = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } lowerCamelCase__ = { """moussaKam/mbarthez""": 1024, """moussaKam/barthez""": 1024, """moussaKam/barthez-orangesum-title""": 1024, } lowerCamelCase__ = """▁""" class A__ ( lowerCAmelCase__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["input_ids", "attention_mask"] def __init__( self : Tuple , a : Optional[Any] , a : Optional[Any]="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : str="<s>" , a : Optional[int]="<unk>" , a : int="<pad>" , a : Dict="<mask>" , a : List[str] = None , **a : List[str] , ): '''simple docstring''' lowerCAmelCase__ : List[str] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token lowerCAmelCase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) lowerCAmelCase__ : List[str] = vocab_file lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) lowerCAmelCase__ : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} lowerCAmelCase__ : Optional[int] = len(self.sp_model ) - 1 lowerCAmelCase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def _lowerCamelCase ( self : List[str] , a : Optional[Any] , a : List[str] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ : int = [self.cls_token_id] lowerCAmelCase__ : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self : Tuple , a : Any , a : Any = None , a : List[str] = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : Union[str, Any] = None ): '''simple docstring''' lowerCAmelCase__ : List[Any] = [self.sep_token_id] lowerCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' return len(self.sp_model ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Dict = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self : Dict , a : int ): '''simple docstring''' return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _lowerCamelCase ( self : Dict , a : List[str] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase__ : Optional[int] = self.sp_model.PieceToId(_lowerCamelCase ) return spm_id if spm_id else self.unk_token_id def _lowerCamelCase ( self : Tuple , a : List[str] ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(_lowerCamelCase ) def _lowerCamelCase ( self : Optional[int] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Any = [] lowerCAmelCase__ : Any = '' lowerCAmelCase__ : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCamelCase ) + token lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : List[str] = [] else: current_sub_tokens.append(_lowerCamelCase ) lowerCAmelCase__ : List[str] = False out_string += self.sp_model.decode(_lowerCamelCase ) return out_string.strip() def __getstate__( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : int = self.__dict__.copy() lowerCAmelCase__ : Optional[int] = None return state def __setstate__( self : Optional[Any] , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase__ : List[str] = {} lowerCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCamelCase ( self : List[str] , a : str , a : List[Any] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase__ : Union[str, Any] = os.path.join( _lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , 'wb' ) as fi: lowerCAmelCase__ : Dict = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
712
import unittest from transformers import DonutProcessor lowerCamelCase__ = """naver-clova-ix/donut-base""" class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Dict = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } lowerCAmelCase__ : Union[str, Any] = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a ) self.assertDictEqual(a , a )
69
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ "WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST", "WavLMForAudioFrameClassification", "WavLMForCTC", "WavLMForSequenceClassification", "WavLMForXVector", "WavLMModel", "WavLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
713
from numpy import exp, pi, sqrt def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int: return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
69
0
import os def lowerCAmelCase__ ( ) -> Union[str, Any]: with open(os.path.dirname(a__ ) + '/grid.txt' ) as f: lowerCAmelCase__ : Any = [] # noqa: E741 for _ in range(20 ): l.append([int(a__ ) for x in f.readline().split()] ) lowerCAmelCase__ : Optional[int] = 0 # right for i in range(20 ): for j in range(17 ): lowerCAmelCase__ : Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: lowerCAmelCase__ : Optional[int] = temp # down for i in range(17 ): for j in range(20 ): lowerCAmelCase__ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: lowerCAmelCase__ : Union[str, Any] = temp # diagonal 1 for i in range(17 ): for j in range(17 ): lowerCAmelCase__ : Union[str, Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: lowerCAmelCase__ : int = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): lowerCAmelCase__ : int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: lowerCAmelCase__ : str = temp return maximum if __name__ == "__main__": print(solution())
714
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class A__ ( __magic_name__ , unittest.TestCase ): lowercase = XLMTokenizer lowercase = False def _lowerCamelCase ( self : int ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase__ : List[str] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) ) lowerCAmelCase__ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(a ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(a ) ) def _lowerCamelCase ( self : List[str] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : List[Any] = 'lower newer' lowerCAmelCase__ : Any = 'lower newer' return input_text, output_text def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase__ : Optional[int] = 'lower' lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>'] lowerCAmelCase__ : Dict = tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowerCAmelCase__ : Tuple = tokens + ['<unk>'] lowerCAmelCase__ : Optional[int] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) @slow def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : List[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' ) lowerCAmelCase__ : Any = tokenizer.encode('sequence builders' , add_special_tokens=a ) lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a ) lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(a ) lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a , a ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
69
0
import random def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: lowerCAmelCase__ : Tuple = a[left_index] lowerCAmelCase__ : Tuple = left_index + 1 for j in range(left_index + 1 , UpperCamelCase__ ): if a[j] < pivot: lowerCAmelCase__ , lowerCAmelCase__ : Dict = a[i], a[j] i += 1 lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = a[i - 1], a[left_index] return i - 1 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: if left < right: lowerCAmelCase__ : List[Any] = random.randint(UpperCamelCase__ , right - 1 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound lowerCAmelCase__ : Union[str, Any] = partition(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) quick_sort_random( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # recursive quicksort to the left of the pivot point quick_sort_random( UpperCamelCase__ , pivot_index + 1 , UpperCamelCase__ ) # recursive quicksort to the right of the pivot point def lowerCAmelCase__ ( ) -> Dict: lowerCAmelCase__ : Any = input('Enter numbers separated by a comma:\n' ).strip() lowerCAmelCase__ : Tuple = [int(UpperCamelCase__ ) for item in user_input.split(',' )] quick_sort_random(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) ) print(UpperCamelCase__ ) if __name__ == "__main__": main()
715
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = image.size lowerCAmelCase__ , lowerCAmelCase__ : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) lowerCAmelCase__ : int = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0 lowerCAmelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 ) lowerCAmelCase__ : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) return 2.0 * image - 1.0 class A__ ( __magic_name__ ): def __init__( self : List[str] , a : VQModel , a : UNetaDModel , a : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=a , unet=a , scheduler=a ) @torch.no_grad() def __call__( self : int , a : Union[torch.Tensor, PIL.Image.Image] = None , a : Optional[int] = 1 , a : Optional[int] = 100 , a : Optional[float] = 0.0 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ): '''simple docstring''' if isinstance(a , PIL.Image.Image ): lowerCAmelCase__ : str = 1 elif isinstance(a , torch.Tensor ): lowerCAmelCase__ : Union[str, Any] = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a )}''' ) if isinstance(a , PIL.Image.Image ): lowerCAmelCase__ : List[Any] = preprocess(a ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image lowerCAmelCase__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width) lowerCAmelCase__ : Optional[Any] = next(self.unet.parameters() ).dtype lowerCAmelCase__ : List[str] = randn_tensor(a , generator=a , device=self.device , dtype=a ) lowerCAmelCase__ : Any = image.to(device=self.device , dtype=a ) # set timesteps and move to the correct device self.scheduler.set_timesteps(a , device=self.device ) lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler lowerCAmelCase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCAmelCase__ : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCAmelCase__ : List[str] = {} if accepts_eta: lowerCAmelCase__ : List[Any] = eta for t in self.progress_bar(a ): # concat latents and low resolution image in the channel dimension. lowerCAmelCase__ : Union[str, Any] = torch.cat([latents, image] , dim=1 ) lowerCAmelCase__ : Dict = self.scheduler.scale_model_input(a , a ) # predict the noise residual lowerCAmelCase__ : Tuple = self.unet(a , a ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase__ : List[str] = self.scheduler.step(a , a , a , **a ).prev_sample # decode the image latents with the VQVAE lowerCAmelCase__ : Dict = self.vqvae.decode(a ).sample lowerCAmelCase__ : Tuple = torch.clamp(a , -1.0 , 1.0 ) lowerCAmelCase__ : Tuple = image / 2 + 0.5 lowerCAmelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase__ : int = self.numpy_to_pil(a ) if not return_dict: return (image,) return ImagePipelineOutput(images=a )
69
0
'''simple docstring''' import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class A__ : def __init__( self : Any , a : Optional[Any] , a : Any=None , a : str=None , a : str=None , a : Union[str, Any]="resnet50" , a : str=3 , a : int=32 , a : Tuple=3 , a : List[Any]=True , a : Tuple=True , ): '''simple docstring''' lowerCAmelCase__ : List[str] = parent lowerCAmelCase__ : List[str] = out_indices if out_indices is not None else [4] lowerCAmelCase__ : List[Any] = stage_names lowerCAmelCase__ : List[str] = out_features lowerCAmelCase__ : int = backbone lowerCAmelCase__ : int = batch_size lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Optional[Any] = use_pretrained_backbone lowerCAmelCase__ : Optional[Any] = is_training def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : Optional[Any] = self.get_config() return config, pixel_values def _lowerCamelCase ( self : Any ): '''simple docstring''' return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def _lowerCamelCase ( self : List[str] , a : Dict , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : int = TimmBackbone(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : str = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = config_and_inputs lowerCAmelCase__ : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class A__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): lowercase = (TimmBackbone,) if is_torch_available() else () lowercase = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {} lowercase = False lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = TimmBackboneModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[Any] = 'resnet18' lowerCAmelCase__ : Any = 'microsoft/resnet-18' lowerCAmelCase__ : str = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : List[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) lowerCAmelCase__ : int = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) lowerCAmelCase__ : Union[str, Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def _lowerCamelCase ( self : int ): '''simple docstring''' pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def _lowerCamelCase ( self : str ): '''simple docstring''' pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def _lowerCamelCase ( self : str ): '''simple docstring''' pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' pass @unittest.skip('Safetensors is not supported by timm.' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _lowerCamelCase ( self : str ): '''simple docstring''' pass def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Tuple = [*signature.parameters.keys()] lowerCAmelCase__ : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : int = True lowerCAmelCase__ : Optional[int] = self.has_attentions # no need to test all models as different heads yield the same functionality lowerCAmelCase__ : int = self.all_model_classes[0] lowerCAmelCase__ : Dict = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : int = model(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Tuple = outputs[0][-1] # Encoder-/Decoder-only models lowerCAmelCase__ : Optional[int] = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: lowerCAmelCase__ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase__ : List[Any] = model(**__SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None lowerCAmelCase__ : Optional[Any] = copy.deepcopy(__SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase__ : Dict = model(**__SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights lowerCAmelCase__ : Optional[Any] = copy.deepcopy(__SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Optional[int] = False lowerCAmelCase__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
716
import os from collections import deque import torch from torch.utils.data import Dataset class A__ ( __magic_name__ ): def __init__( self : Union[str, Any] , a : str="" , a : str="train" ): '''simple docstring''' assert os.path.isdir(a ) lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = os.listdir(a ) for story_filename in story_filenames_list: if "summary" in story_filename: continue lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a ) if not os.path.isfile(a ): continue self.documents.append(a ) def __len__( self : Any ): '''simple docstring''' return len(self.documents ) def __getitem__( self : Dict , a : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.documents[idx] lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1] with open(a , encoding='utf-8' ) as source: lowerCAmelCase__ : List[Any] = source.read() lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a ) return document_name, story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) ) # for some unknown reason some lines miss a period, add it lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines] # gather article lines lowerCAmelCase__ : int = [] lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ ) while True: try: lowerCAmelCase__ : int = lines.popleft() if element.startswith('@highlight' ): break story_lines.append(SCREAMING_SNAKE_CASE_ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) ) return story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')'] if line.startswith('@highlight' ): return line if line[-1] in END_TOKENS: return line return line + "." def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if len(SCREAMING_SNAKE_CASE_ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) ) return sequence def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = sequence == pad_token_id lowerCAmelCase__ : Optional[int] = 0 return mask def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines] lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence] lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines] lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = [] for sequence in batch: lowerCAmelCase__ : Union[str, Any] = -1 lowerCAmelCase__ : int = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(SCREAMING_SNAKE_CASE_ ) return torch.tensor(SCREAMING_SNAKE_CASE_ )
69
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: for attribute in key.split('.' ): lowerCAmelCase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if weight_type is not None: lowerCAmelCase__ : Any = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape else: lowerCAmelCase__ : Any = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCAmelCase__ : Optional[Any] = value elif weight_type == "weight_g": lowerCAmelCase__ : str = value elif weight_type == "weight_v": lowerCAmelCase__ : List[Any] = value elif weight_type == "bias": lowerCAmelCase__ : List[str] = value else: lowerCAmelCase__ : List[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : int = fairseq_model.state_dict() lowerCAmelCase__ : Optional[int] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): lowerCAmelCase__ : str = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == 'group' , ) lowerCAmelCase__ : List[Any] = True else: for key, mapped_key in MAPPING.items(): lowerCAmelCase__ : Tuple = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: lowerCAmelCase__ : List[str] = True if "*" in mapped_key: lowerCAmelCase__ : int = name.split(SCREAMING_SNAKE_CASE_ )[0].split('.' )[-2] lowerCAmelCase__ : List[str] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE_ ) if "weight_g" in name: lowerCAmelCase__ : str = 'weight_g' elif "weight_v" in name: lowerCAmelCase__ : Tuple = 'weight_v' elif "weight" in name: lowerCAmelCase__ : Optional[int] = 'weight' elif "bias" in name: lowerCAmelCase__ : str = 'bias' else: lowerCAmelCase__ : List[str] = None set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: lowerCAmelCase__ : Tuple = full_name.split('conv_layers.' )[-1] lowerCAmelCase__ : Optional[int] = name.split('.' ) lowerCAmelCase__ : str = int(items[0] ) lowerCAmelCase__ : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCAmelCase__ : Optional[int] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCAmelCase__ : Optional[Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCAmelCase__ : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCAmelCase__ : Dict = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: lowerCAmelCase__ : Optional[Any] = SEWConfig() if is_finetuned: lowerCAmelCase__ : List[str] = model.wav_encoder.wav_model.cfg else: lowerCAmelCase__ : Dict = model.cfg lowerCAmelCase__ : Dict = fs_config.conv_bias lowerCAmelCase__ : Tuple = eval(fs_config.conv_feature_layers ) lowerCAmelCase__ : Optional[Any] = [x[0] for x in conv_layers] lowerCAmelCase__ : List[Any] = [x[1] for x in conv_layers] lowerCAmelCase__ : str = [x[2] for x in conv_layers] lowerCAmelCase__ : Tuple = 'gelu' lowerCAmelCase__ : Union[str, Any] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group' lowerCAmelCase__ : List[Any] = 0.0 lowerCAmelCase__ : Tuple = fs_config.activation_fn.name lowerCAmelCase__ : List[Any] = fs_config.encoder_embed_dim lowerCAmelCase__ : Tuple = 0.02 lowerCAmelCase__ : List[Any] = fs_config.encoder_ffn_embed_dim lowerCAmelCase__ : Optional[Any] = 1e-5 lowerCAmelCase__ : Optional[Any] = fs_config.encoder_layerdrop lowerCAmelCase__ : Optional[Any] = fs_config.encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = fs_config.conv_pos_groups lowerCAmelCase__ : Any = fs_config.conv_pos lowerCAmelCase__ : Any = len(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = fs_config.encoder_layers lowerCAmelCase__ : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: lowerCAmelCase__ : int = model.cfg lowerCAmelCase__ : Dict = fs_config.final_dropout lowerCAmelCase__ : List[str] = fs_config.layerdrop lowerCAmelCase__ : Union[str, Any] = fs_config.activation_dropout lowerCAmelCase__ : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 lowerCAmelCase__ : Tuple = fs_config.attention_dropout lowerCAmelCase__ : Tuple = fs_config.dropout_input lowerCAmelCase__ : Optional[Any] = fs_config.dropout lowerCAmelCase__ : int = fs_config.mask_channel_length lowerCAmelCase__ : Optional[Any] = fs_config.mask_channel_prob lowerCAmelCase__ : int = fs_config.mask_length lowerCAmelCase__ : Dict = fs_config.mask_prob lowerCAmelCase__ : Tuple = 'Wav2Vec2FeatureExtractor' lowerCAmelCase__ : List[str] = 'Wav2Vec2CTCTokenizer' return config @torch.no_grad() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True ) -> List[Any]: if is_finetuned: lowerCAmelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: lowerCAmelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: lowerCAmelCase__ : List[str] = SEWConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) else: lowerCAmelCase__ : Optional[Any] = convert_config(model[0] , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[int] = model[0].eval() lowerCAmelCase__ : Tuple = True if config.feat_extract_norm == 'layer' else False lowerCAmelCase__ : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ) if is_finetuned: if dict_path: lowerCAmelCase__ : Union[str, Any] = Dictionary.load(SCREAMING_SNAKE_CASE_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCAmelCase__ : str = target_dict.pad_index lowerCAmelCase__ : Union[str, Any] = target_dict.bos_index lowerCAmelCase__ : List[str] = target_dict.pad_index lowerCAmelCase__ : Tuple = target_dict.bos_index lowerCAmelCase__ : List[str] = target_dict.eos_index lowerCAmelCase__ : Union[str, Any] = len(target_dict.symbols ) lowerCAmelCase__ : int = os.path.join(SCREAMING_SNAKE_CASE_ , 'vocab.json' ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE_ ) ) return os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : List[str] = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE_ , ) lowerCAmelCase__ : Optional[int] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[int] = SEWForCTC(SCREAMING_SNAKE_CASE_ ) else: lowerCAmelCase__ : Any = SEWModel(SCREAMING_SNAKE_CASE_ ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ ) recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) lowerCamelCase__ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
717
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCamelCase__ = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowerCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowerCamelCase__ = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") lowerCamelCase__ = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ : int = None # source code of `config_class` lowerCAmelCase__ : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('/' ): lowerCAmelCase__ : Union[str, Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase__ : Dict = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: lowerCAmelCase__ : str = ckpt_name break return checkpoint def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : Union[str, Any] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase__ : Union[str, Any] = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCAmelCase__ : List[str] = '\n'.join(sorted(SCREAMING_SNAKE_CASE_ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
69
0
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class A__ ( __magic_name__ ): def __init__( self : Dict , a : Dict , a : List[str]=None , a : Optional[Any]=True , a : str=None , **a : List[str] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Union[str, Any] = config_class lowerCAmelCase__ : Optional[Any] = has_text_modality lowerCAmelCase__ : List[str] = kwargs lowerCAmelCase__ : List[str] = common_properties def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.config_class(**self.inputs_dict ) lowerCAmelCase__ : Optional[Any] = ( ['hidden_size', 'num_attention_heads', 'num_hidden_layers'] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['vocab_size'] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(__A , __A ) , msg=f'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(__A ): try: setattr(__A , __A , __A ) self.parent.assertEqual( getattr(__A , __A ) , __A , msg=f'''`{name} value {idx} expected, but was {getattr(__A , __A )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(__A ): try: lowerCAmelCase__ : List[Any] = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(__A , __A ) , __A , msg=f'''`{name} value {idx} expected, but was {getattr(__A , __A )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Dict = self.config_class(**self.inputs_dict ) lowerCAmelCase__ : List[str] = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , __A ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ : str = os.path.join(__A , 'config.json' ) config_first.to_json_file(__A ) lowerCAmelCase__ : List[Any] = self.config_class.from_json_file(__A ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(__A ) lowerCAmelCase__ : str = self.config_class.from_pretrained(__A ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.config_class(**self.inputs_dict ) lowerCAmelCase__ : Tuple = 'test' with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ : List[str] = os.path.join(__A , __A ) config_first.save_pretrained(__A ) lowerCAmelCase__ : int = self.config_class.from_pretrained(__A , subfolder=__A ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) lowerCAmelCase__ : Optional[int] = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' if self.config_class.is_composition: return lowerCAmelCase__ : Tuple = self.config_class() self.parent.assertIsNotNone(__A ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = copy.deepcopy(__A ) lowerCAmelCase__ : Tuple = self.config_class(**__A ) lowerCAmelCase__ : Optional[int] = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) ) elif getattr(__A , __A ) != value: wrong_values.append((key, getattr(__A , __A ), value) ) if len(__A ) > 0: lowerCAmelCase__ : str = '\n'.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
718
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
69
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): lowerCAmelCase__ : List[Any] = F'''Input value of [number={number}] must be an integer''' raise TypeError(_lowerCamelCase ) if number < 1: lowerCAmelCase__ : str = F'''Input value of [number={number}] must be > 0''' raise ValueError(_lowerCamelCase ) lowerCAmelCase__ : Dict = 1 for i in range(1 , _lowerCamelCase ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
719
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ = { """configuration_chinese_clip""": [ """CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ChineseCLIPConfig""", """ChineseCLIPOnnxConfig""", """ChineseCLIPTextConfig""", """ChineseCLIPVisionConfig""", ], """processing_chinese_clip""": ["""ChineseCLIPProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ChineseCLIPFeatureExtractor"""] lowerCamelCase__ = ["""ChineseCLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ChineseCLIPModel""", """ChineseCLIPPreTrainedModel""", """ChineseCLIPTextModel""", """ChineseCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
69
0
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class A__ : @staticmethod def _lowerCamelCase ( *a : str , **a : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str: lowerCAmelCase__ : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class A__ ( unittest.TestCase ): lowercase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def _lowerCamelCase ( self : str , a : Dict , a : List[str] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = DepthEstimationPipeline(model=A__ , image_processor=A__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _lowerCamelCase ( self : Any , a : List[Any] , a : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' ) self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , A__ ) import datasets lowerCAmelCase__ : Optional[int] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) lowerCAmelCase__ : str = depth_estimator( [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] ) self.assertEqual( [ {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, ] , A__ , ) @require_tf @unittest.skip('Depth estimation is not implemented in TF' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @slow @require_torch def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = """Intel/dpt-large""" lowerCAmelCase__ : Dict = pipeline('depth-estimation' , model=A__ ) lowerCAmelCase__ : List[str] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' ) lowerCAmelCase__ : List[Any] = hashimage(outputs['depth'] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.6_6_2 ) @require_torch def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
720
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowerCamelCase__ = logging.getLogger(__name__) torch.set_grad_enabled(False) lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]: lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict: lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for title, text in zip(documents['title'] , documents['text'] ): if text is not None: for passage in split_text(SCREAMING_SNAKE_CASE_ ): titles.append(title if title is not None else '' ) texts.append(SCREAMING_SNAKE_CASE_ ) return {"title": titles, "text": texts} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict: lowerCAmelCase__ : List[str] = ctx_tokenizer( documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids'] lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: ###################################### logger.info('Step 1 - Create the dataset' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowerCAmelCase__ : str = load_dataset( 'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowerCAmelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc ) # And compute the embeddings lowerCAmelCase__ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowerCAmelCase__ : List[Any] = Features( {'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space lowerCAmelCase__ : List[Any] = dataset.map( partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , ) # And finally save your dataset lowerCAmelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' ) dataset.save_to_disk(SCREAMING_SNAKE_CASE_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('Step 2 - Index the dataset' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowerCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ ) # And save the index lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' ) dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class A__ : lowercase = field( default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowercase = field( default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowercase = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowercase = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowercase = field( default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class A__ : lowercase = field( default=__magic_name__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowercase = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class A__ : lowercase = field( default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowercase = field( default=128 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowerCamelCase__ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
69
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> tuple[int, int]: try: lowerCAmelCase__ : Union[str, Any] = float(lowerCAmelCase__ ) except ValueError: raise ValueError('Please enter a valid number' ) lowerCAmelCase__ : int = decimal - int(lowerCAmelCase__ ) if fractional_part == 0: return int(lowerCAmelCase__ ), 1 else: lowerCAmelCase__ : str = len(str(lowerCAmelCase__ ).split('.' )[1] ) lowerCAmelCase__ : Any = int(decimal * (10**number_of_frac_digits) ) lowerCAmelCase__ : Any = 10**number_of_frac_digits lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = denominator, numerator while True: lowerCAmelCase__ : List[Any] = dividend % divisor if remainder == 0: break lowerCAmelCase__ , lowerCAmelCase__ : int = divisor, remainder lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = numerator / divisor, denominator / divisor return int(lowerCAmelCase__ ), int(lowerCAmelCase__ ) if __name__ == "__main__": print(F"""{decimal_to_fraction(2) = }""") print(F"""{decimal_to_fraction(89.0) = }""") print(F"""{decimal_to_fraction("67") = }""") print(F"""{decimal_to_fraction("45.0") = }""") print(F"""{decimal_to_fraction(1.5) = }""") print(F"""{decimal_to_fraction("6.25") = }""") print(F"""{decimal_to_fraction("78td") = }""")
721
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A__ ( __magic_name__ ): lowercase = (DDPMParallelScheduler,) def _lowerCamelCase ( self : str , **a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : str = { 'num_train_timesteps': 1_000, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**a ) return config def _lowerCamelCase ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=a ) def _lowerCamelCase ( self : int ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=a , beta_end=a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' self.check_over_configs(thresholding=a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=a , prediction_type=a , sample_max_value=a , ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=a ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : List[str] = scheduler_class(**a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5 def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Any = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : int = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Tuple = self.dummy_model() lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1 lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1 lowerCAmelCase__ : Tuple = samplea.shape[0] lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a ) lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) lowerCAmelCase__ : str = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3 def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.scheduler_classes[0] lowerCAmelCase__ : List[Any] = self.get_scheduler_config() lowerCAmelCase__ : Dict = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Any = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter lowerCAmelCase__ : Tuple = torch.manual_seed(0 ) for t in reversed(range(a ) ): # 1. predict noise residual lowerCAmelCase__ : Optional[Any] = model(a , a ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample lowerCAmelCase__ : List[str] = pred_prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3 def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase__ : int = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Optional[int] = self.dummy_model() lowerCAmelCase__ : List[str] = self.dummy_sample_deter lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(a ) ): # 1. predict noise residual lowerCAmelCase__ : List[Any] = model(a , a ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample lowerCAmelCase__ : str = pred_prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3 def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=a ) lowerCAmelCase__ : List[Any] = scheduler.timesteps for i, timestep in enumerate(a ): if i == len(a ) - 1: lowerCAmelCase__ : Tuple = -1 else: lowerCAmelCase__ : Dict = timesteps[i + 1] lowerCAmelCase__ : str = scheduler.previous_timestep(a ) lowerCAmelCase__ : int = prev_t.item() self.assertEqual(a , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : Optional[int] = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**a ) lowerCAmelCase__ : str = [100, 87, 50, 51, 0] with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : str = [100, 87, 50, 1, 0] lowerCAmelCase__ : int = len(a ) with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=a , timesteps=a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps] with self.assertRaises( a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=a )
69
0
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( __magic_name__ , unittest.TestCase ): lowercase = XLNetTokenizer lowercase = XLNetTokenizerFast lowercase = True lowercase = True def _lowerCamelCase ( self : Tuple ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = '<s>' lowerCAmelCase__ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<eod>' ) self.assertEqual(len(UpperCamelCase__ ) , 1_006 ) def _lowerCamelCase ( self : str ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) lowerCAmelCase__ : List[str] = tokenizer.tokenize('This is a test' ) self.assertListEqual(UpperCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] ) lowerCAmelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) lowerCAmelCase__ : Any = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : str = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ ) lowerCAmelCase__ : str = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + '', 'i', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Any = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ ) lowerCAmelCase__ : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) @slow def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = XLNetTokenizer.from_pretrained('xlnet-base-cased' ) lowerCAmelCase__ : str = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase__ ) lowerCAmelCase__ : int = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase__ ) lowerCAmelCase__ : str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ ) lowerCAmelCase__ : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : List[str] = {'input_ids': [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
700
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A__ ( __magic_name__ ): lowercase = ['image_processor', 'tokenizer'] lowercase = 'LayoutLMv3ImageProcessor' lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a , ) lowerCAmelCase__ : int = kwargs.pop('feature_extractor' ) lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a , a ) def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(a , a ): lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase__ : List[str] = features['words'] lowerCAmelCase__ : List[Any] = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , ) # add pixel values lowerCAmelCase__ : Tuple = features.pop('pixel_values' ) if return_overflowing_tokens is True: lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] ) lowerCAmelCase__ : List[str] = images return encoded_inputs def _lowerCamelCase ( self : Any , a : List[str] , a : int ): '''simple docstring''' lowerCAmelCase__ : int = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(a ) != len(a ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f''' {len(a )} and {len(a )}''' ) return images_with_overflow def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*a , **a ) def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*a , **a ) @property def _lowerCamelCase ( self : int ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , ) return self.image_processor_class @property def _lowerCamelCase ( self : Dict ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , ) return self.image_processor
69
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """tokenizer_file""": { """bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""", """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""", }, } class A__ ( snake_case_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = ["""input_ids""", """attention_mask"""] lowercase = None def __init__( self : Dict , a : Optional[int]=None , a : Union[str, Any]=None , a : Tuple=None , a : Any="<unk>" , a : str="<s>" , a : Optional[int]="</s>" , a : Optional[int]="<pad>" , a : List[Any]=False , a : List[Any]=False , **a : Union[str, Any] , ): '''simple docstring''' super().__init__( a , a , tokenizer_file=a , unk_token=a , bos_token=a , eos_token=a , pad_token=a , add_prefix_space=a , clean_up_tokenization_spaces=a , **a , ) lowerCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , a ) != add_prefix_space: lowerCAmelCase__ : Dict = getattr(a , pre_tok_state.pop('type' ) ) lowerCAmelCase__ : Any = add_prefix_space lowerCAmelCase__ : Union[str, Any] = pre_tok_class(**a ) lowerCAmelCase__ : Dict = add_prefix_space def _lowerCamelCase ( self : int , *a : Any , **a : Any ): '''simple docstring''' lowerCAmelCase__ : str = kwargs.get('is_split_into_words' , a ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.' ) return super()._batch_encode_plus(*a , **a ) def _lowerCamelCase ( self : Any , *a : Optional[Any] , **a : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : int = kwargs.get('is_split_into_words' , a ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.' ) return super()._encode_plus(*a , **a ) def _lowerCamelCase ( self : List[str] , a : Tuple , a : List[str] = None ): '''simple docstring''' lowerCAmelCase__ : Any = self._tokenizer.model.save(a , name=a ) return tuple(a ) def _lowerCamelCase ( self : Any , a : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(a , add_special_tokens=a ) + [self.eos_token_id] ) if len(a ) > self.model_max_length: lowerCAmelCase__ : Any = input_ids[-self.model_max_length :] return input_ids
701
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class A__ ( __magic_name__ ): def __init__( self : List[str] , a : Optional[Any] , a : int=13 , a : str=7 , a : Any=True , a : List[str]=True , a : Any=False , a : List[Any]=True , a : List[str]=99 , a : Optional[Any]=32 , a : List[str]=5 , a : List[Any]=4 , a : List[Any]=64 , a : List[Any]="gelu" , a : List[Any]=0.1 , a : List[Any]=0.1 , a : int=512 , a : Tuple=16 , a : List[str]=2 , a : int=0.0_2 , a : Union[str, Any]=3 , a : Any=4 , a : Union[str, Any]=None , a : Union[str, Any]=2 , a : List[str]=2 , a : int=2 , a : Dict=2 , a : List[str]=4 , a : str=1 , ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = parent lowerCAmelCase__ : int = batch_size lowerCAmelCase__ : str = seq_length lowerCAmelCase__ : Tuple = is_training lowerCAmelCase__ : List[str] = use_input_mask lowerCAmelCase__ : Optional[int] = use_token_type_ids lowerCAmelCase__ : Any = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : str = hidden_size lowerCAmelCase__ : str = num_hidden_layers lowerCAmelCase__ : List[str] = num_attention_heads lowerCAmelCase__ : int = intermediate_size lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : Optional[int] = type_vocab_size lowerCAmelCase__ : Dict = type_sequence_label_size lowerCAmelCase__ : Optional[int] = initializer_range lowerCAmelCase__ : List[Any] = num_labels lowerCAmelCase__ : Any = num_choices lowerCAmelCase__ : str = scope lowerCAmelCase__ : Any = q_groups lowerCAmelCase__ : Any = k_groups lowerCAmelCase__ : Union[str, Any] = v_groups lowerCAmelCase__ : int = post_attention_groups lowerCAmelCase__ : str = intermediate_groups lowerCAmelCase__ : Union[str, Any] = output_groups def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : Tuple = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : Tuple = None if self.use_labels: lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self : str ): '''simple docstring''' return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : str , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = SqueezeBertModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = model(a , a ) lowerCAmelCase__ : Any = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : int , a : Union[str, Any] , a : Tuple , a : Any ): '''simple docstring''' lowerCAmelCase__ : List[str] = SqueezeBertForMaskedLM(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Any = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[Any] , a : str , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = SqueezeBertForQuestionAnswering(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = model( a , attention_mask=a , start_positions=a , end_positions=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self : Tuple , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : str , a : str , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.num_labels lowerCAmelCase__ : Dict = SqueezeBertForSequenceClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Any , a : int , a : Any , a : Dict , a : Any , a : Tuple , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = self.num_labels lowerCAmelCase__ : Dict = SqueezeBertForTokenClassification(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self : str , a : Optional[int] , a : List[Any] , a : int , a : List[Any] , a : Union[str, Any] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = model( a , attention_mask=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.prepare_config_and_inputs() ((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = config_and_inputs lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) lowercase = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = True lowercase = False def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = SqueezeBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , dim=37 ) def _lowerCamelCase ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a ) @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Optional[int] = SqueezeBertModel.from_pretrained(a ) self.assertIsNotNone(a ) @require_sentencepiece @require_tokenizers @require_torch class A__ ( unittest.TestCase ): @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) lowerCAmelCase__ : str = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] ) lowerCAmelCase__ : Any = model(a )[0] lowerCAmelCase__ : Tuple = torch.Size((1, 3) ) self.assertEqual(output.shape , a ) lowerCAmelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] ) self.assertTrue(torch.allclose(a , a , atol=1E-4 ) )
69
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase__ = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
702
lowerCamelCase__ = """Alexander Joslin""" import operator as op from .stack import Stack def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} lowerCAmelCase__ : Stack[int] = Stack() lowerCAmelCase__ : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) ) elif i in operators: # RULE 2 operator_stack.push(SCREAMING_SNAKE_CASE_ ) elif i == ")": # RULE 4 lowerCAmelCase__ : List[Any] = operator_stack.peek() operator_stack.pop() lowerCAmelCase__ : List[str] = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ : List[Any] = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) operand_stack.push(SCREAMING_SNAKE_CASE_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
69
0
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class A__ : def __init__( self : Any , a : str , a : List[Any]=13 , a : int=7 , a : Optional[int]=True , a : Any=True , a : Union[str, Any]=True , a : int=True , a : Optional[Any]=99 , a : List[str]=64 , a : int=5 , a : Optional[Any]=4 , a : Any=37 , a : List[Any]="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : List[Any]=16 , a : int=2 , a : Union[str, Any]=0.0_2 , a : Dict=3 , a : List[str]=4 , a : Union[str, Any]=None , ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = parent lowerCAmelCase__ : List[str] = batch_size lowerCAmelCase__ : Dict = seq_length lowerCAmelCase__ : Any = is_training lowerCAmelCase__ : int = use_input_mask lowerCAmelCase__ : Optional[Any] = use_token_type_ids lowerCAmelCase__ : List[Any] = use_labels lowerCAmelCase__ : int = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Any = hidden_act lowerCAmelCase__ : int = hidden_dropout_prob lowerCAmelCase__ : Tuple = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = max_position_embeddings lowerCAmelCase__ : Optional[Any] = type_vocab_size lowerCAmelCase__ : List[Any] = type_sequence_label_size lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : int = num_labels lowerCAmelCase__ : Union[str, Any] = num_choices lowerCAmelCase__ : Tuple = scope lowerCAmelCase__ : List[Any] = vocab_size - 1 def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : Optional[int] = None if self.use_input_mask: lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Union[str, Any] = None if self.use_labels: lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : str = self.get_config() return config, input_ids, input_mask, token_labels def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ : str = True return config, input_ids, input_mask, token_labels def _lowerCamelCase ( self : Union[str, Any] , a : Union[str, Any] , a : Any , a : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = GPTNeoXModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Dict = model(a , attention_mask=a ) lowerCAmelCase__ : Dict = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : Optional[int] , a : Tuple , a : Optional[int] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Dict = GPTNeoXModel(a ) model.to(a ) model.eval() lowerCAmelCase__ : Dict = model(a , attention_mask=a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : Optional[int] , a : Optional[int] , a : List[str] , a : Dict , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = GPTNeoXForCausalLM(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self : List[Any] , a : Any , a : Dict , a : int , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.num_labels lowerCAmelCase__ : Tuple = GPTNeoXForQuestionAnswering(a ) model.to(a ) model.eval() lowerCAmelCase__ : int = model(a , attention_mask=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self : int , a : int , a : int , a : Optional[int] , a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : str = self.num_labels lowerCAmelCase__ : Tuple = GPTNeoXForSequenceClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Union[str, Any] , a : Tuple , a : int , a : Tuple , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = GPTNeoXForTokenClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : List[Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self : List[Any] , a : int , a : List[str] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : Any = GPTNeoXForCausalLM(config=a ) model.to(a ) model.eval() # first forward pass lowerCAmelCase__ : str = model(a , attention_mask=a , use_cache=a ) lowerCAmelCase__ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase__ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase__ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase__ : Any = model(a , attention_mask=a , output_hidden_states=a ) lowerCAmelCase__ : int = output_from_no_past['hidden_states'][0] lowerCAmelCase__ : Tuple = model( a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0] # select random slice lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase__ : int = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase__ : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.prepare_config_and_inputs() lowerCAmelCase__ : Optional[int] = config_and_inputs lowerCAmelCase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( __A , __A , __A , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : str = GPTNeoXModelTester(self ) lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=64 , num_attention_heads=8 ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(a , a , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(a , a , a ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder() lowerCAmelCase__ : Dict = None self.model_tester.create_and_check_model_as_decoder(a , a , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*a ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a ) @unittest.skip(reason='Feed forward chunking is not implemented' ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def _lowerCamelCase ( self : Optional[Any] , a : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[Any] = ids_tensor([1, 10] , config.vocab_size ) lowerCAmelCase__ : Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase__ : Tuple = GPTNeoXModel(a ) original_model.to(a ) original_model.eval() lowerCAmelCase__ : Tuple = original_model(a ).last_hidden_state lowerCAmelCase__ : Any = original_model(a ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase__ : Union[str, Any] = {'type': scaling_type, 'factor': 1_0.0} lowerCAmelCase__ : List[str] = GPTNeoXModel(a ) scaled_model.to(a ) scaled_model.eval() lowerCAmelCase__ : Tuple = scaled_model(a ).last_hidden_state lowerCAmelCase__ : Optional[Any] = scaled_model(a ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a , a , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(a , a , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(a , a , atol=1E-5 ) ) @require_torch class A__ ( unittest.TestCase ): @slow def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' ) for checkpointing in [True, False]: lowerCAmelCase__ : Optional[int] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(a ) lowerCAmelCase__ : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(a ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 lowerCAmelCase__ : Any = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' lowerCAmelCase__ : List[Any] = model.generate(**a , do_sample=a , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = tokenizer.batch_decode(a )[0] self.assertEqual(a , a )
703
import numpy class A__ : def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ): '''simple docstring''' lowerCAmelCase__ : int = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCAmelCase__ : Dict = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCAmelCase__ : List[str] = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCAmelCase__ : str = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : str = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCAmelCase__ : Tuple = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCAmelCase__ : Any = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCAmelCase__ : Optional[Any] = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCAmelCase__ : int = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ): '''simple docstring''' for iteration in range(1 , iterations + 1 ): lowerCAmelCase__ : Any = self.feedforward() self.back_propagation() if give_loss: lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'''Iteration {iteration} Loss: {loss}''' ) def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ): '''simple docstring''' lowerCAmelCase__ : Dict = input_arr lowerCAmelCase__ : Any = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCAmelCase__ : int = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCAmelCase__ : List[Any] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray: return (value) * (1 - (value)) def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : Any = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork( input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
69
0
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ ( unittest.TestCase ): def __init__( self : Any , a : List[Any] , a : List[str]=3 , a : Union[str, Any]=32 , a : List[str]=3 , a : Any=10 , a : str=[10, 20, 30, 40] , a : Any=[1, 1, 2, 1] , a : Any=True , a : str=True , a : Dict="relu" , a : Tuple=3 , a : List[Any]=None , ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Tuple = embeddings_size lowerCAmelCase__ : Union[str, Any] = hidden_sizes lowerCAmelCase__ : Tuple = depths lowerCAmelCase__ : Union[str, Any] = is_training lowerCAmelCase__ : List[str] = use_labels lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Dict = num_labels lowerCAmelCase__ : Dict = scope lowerCAmelCase__ : Union[str, Any] = len(_A ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : int = self.get_config() return config, pixel_values def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def _lowerCamelCase ( self : str , a : Union[str, Any] , a : str ): '''simple docstring''' lowerCAmelCase__ : Tuple = FlaxRegNetModel(config=_A ) lowerCAmelCase__ : int = model(_A ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : str ): '''simple docstring''' lowerCAmelCase__ : int = self.num_labels lowerCAmelCase__ : Any = FlaxRegNetForImageClassification(config=_A ) lowerCAmelCase__ : str = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ : Any = config_and_inputs lowerCAmelCase__ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class A__ ( __lowercase , unittest.TestCase ): lowercase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = FlaxRegNetModelTester(self ) lowerCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A ) def _lowerCamelCase ( self : Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @unittest.skip(reason='RegNet does not use inputs_embeds' ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : List[str] = model_class(_A ) lowerCAmelCase__ : Union[str, Any] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : List[Any] = [*signature.parameters.keys()] lowerCAmelCase__ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' def check_hidden_states_output(a : Optional[int] , a : Union[str, Any] , a : Optional[int] ): lowerCAmelCase__ : Dict = model_class(_A ) lowerCAmelCase__ : Optional[int] = model(**self._prepare_for_class(_A , _A ) ) lowerCAmelCase__ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(_A ) , expected_num_stages + 1 ) lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : int = True check_hidden_states_output(_A , _A , _A ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase__ : List[Any] = self._prepare_for_class(_A , _A ) lowerCAmelCase__ : Optional[int] = model_class(_A ) @jax.jit def model_jitted(a : Any , **a : Dict ): return model(pixel_values=_A , **_A ) with self.subTest('JIT Enabled' ): lowerCAmelCase__ : Optional[Any] = model_jitted(**_A ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): lowerCAmelCase__ : Optional[int] = model_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase__ ( ) -> Dict: lowerCAmelCase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_flax class A__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : str ): '''simple docstring''' return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None @slow def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' ) lowerCAmelCase__ : Any = self.default_image_processor lowerCAmelCase__ : Tuple = prepare_img() lowerCAmelCase__ : List[Any] = image_processor(images=_A , return_tensors='np' ) lowerCAmelCase__ : Tuple = model(**_A ) # verify the logits lowerCAmelCase__ : Union[str, Any] = (1, 1_000) self.assertEqual(outputs.logits.shape , _A ) lowerCAmelCase__ : Optional[Any] = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
704
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ): '''simple docstring''' lowerCAmelCase__ : str = parent lowerCAmelCase__ : Union[str, Any] = batch_size lowerCAmelCase__ : List[str] = image_size lowerCAmelCase__ : Optional[Any] = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Optional[int] = embed_dim lowerCAmelCase__ : Tuple = depths lowerCAmelCase__ : List[str] = num_heads lowerCAmelCase__ : List[Any] = window_size lowerCAmelCase__ : Any = mlp_ratio lowerCAmelCase__ : Optional[Any] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : int = drop_path_rate lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : int = use_absolute_embeddings lowerCAmelCase__ : List[str] = patch_norm lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = initializer_range lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : List[Any] = scope lowerCAmelCase__ : Dict = use_labels lowerCAmelCase__ : List[Any] = type_sequence_label_size lowerCAmelCase__ : Optional[Any] = encoder_stride def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : Optional[Any] = None if self.use_labels: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : int = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = model(a ) lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : str = model(a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : List[str] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.type_sequence_label_size lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs lowerCAmelCase__ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowercase = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self ) lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) @unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='Swinv2 does not use inputs_embeds' ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[int] = model_class(a ) lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Tuple = [*signature.parameters.keys()] lowerCAmelCase__ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : str = False lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : Dict = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Dict = outputs.attentions lowerCAmelCase__ : Dict = len(self.model_tester.depths ) self.assertEqual(len(a ) , a ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Optional[int] = config.window_size**2 lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCAmelCase__ : Tuple = len(a ) # Check attention is always last and order is fine lowerCAmelCase__ : str = True lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) if hasattr(self.model_tester , 'num_hidden_states_types' ): lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase__ : Any = 2 self.assertEqual(out_len + added_hidden_states , len(a ) ) lowerCAmelCase__ : Dict = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.hidden_states lowerCAmelCase__ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a ) , a ) # Swinv2 has a different seq_length lowerCAmelCase__ : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(a ) , a ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape lowerCAmelCase__ : List[str] = ( reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Any = 3 lowerCAmelCase__ : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase__ : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase__ : str = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a ) self.assertIsNotNone(a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = _config_zero_init(a ) for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(config=a ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class A__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to( a ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**a ) # verify the logits lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
69
0
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed lowerCamelCase__ = logging.getLogger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 2 ) -> int: def get_dataset(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : List[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(lowerCAmelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) lowerCAmelCase__ : Tuple = get_dataset(lowerCAmelCase__ ) lowerCAmelCase__ : Any = get_dataset(lowerCAmelCase__ ) lowerCAmelCase__ : Optional[Any] = DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , num_workers=4 ) lowerCAmelCase__ : Any = DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]: lowerCAmelCase__ : Union[str, Any] = [] for epoch in range(lowerCAmelCase__ ): # Train quickly model.train() for batch in dataloader: lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = batch lowerCAmelCase__ : Optional[Any] = model(lowerCAmelCase__ ) lowerCAmelCase__ : Tuple = torch.nn.functional.mse_loss(lowerCAmelCase__ , lowerCAmelCase__ ) accelerator.backward(lowerCAmelCase__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class A__ ( nn.Module ): def __init__( self : Any ): '''simple docstring''' super().__init__() lowerCAmelCase__ : str = nn.Parameter(torch.randn(1 ) ) lowerCAmelCase__ : str = nn.Parameter(torch.randn(1 ) ) def _lowerCamelCase ( self : List[Any] , a : Any ): '''simple docstring''' return x * self.a + self.b class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) lowerCAmelCase__ : Tuple = DummyModel() lowerCAmelCase__ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = dummy_dataloaders() lowerCAmelCase__ : Tuple = ProjectConfiguration(total_limit=1 , project_dir=snake_case__ , automatic_checkpoint_naming=snake_case__ ) # Train baseline lowerCAmelCase__ : Dict = Accelerator(project_config=snake_case__ ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) lowerCAmelCase__ : List[str] = DummyModel() lowerCAmelCase__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase__ , lowerCAmelCase__ : Any = dummy_dataloaders() # Train baseline lowerCAmelCase__ : Dict = Accelerator() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save initial lowerCAmelCase__ : Optional[int] = os.path.join(snake_case__ , 'initial' ) accelerator.save_state(snake_case__ ) ((lowerCAmelCase__) , (lowerCAmelCase__)) : Optional[int] = model.a.item(), model.b.item() lowerCAmelCase__ : Tuple = optimizer.state_dict() lowerCAmelCase__ : Optional[int] = train(3 , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) ((lowerCAmelCase__) , (lowerCAmelCase__)) : Optional[Any] = model.a.item(), model.b.item() lowerCAmelCase__ : int = optimizer.state_dict() # Train partially set_seed(42 ) lowerCAmelCase__ : Dict = DummyModel() lowerCAmelCase__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = dummy_dataloaders() lowerCAmelCase__ : Optional[int] = Accelerator() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) accelerator.load_state(snake_case__ ) ((lowerCAmelCase__) , (lowerCAmelCase__)) : Dict = model.a.item(), model.b.item() lowerCAmelCase__ : List[str] = optimizer.state_dict() self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) lowerCAmelCase__ : Union[str, Any] = train(2 , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save everything lowerCAmelCase__ : int = os.path.join(snake_case__ , 'checkpoint' ) accelerator.save_state(snake_case__ ) # Load everything back in and make sure all states work accelerator.load_state(snake_case__ ) test_rands += train(1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) ((lowerCAmelCase__) , (lowerCAmelCase__)) : Union[str, Any] = model.a.item(), model.b.item() lowerCAmelCase__ : Optional[Any] = optimizer.state_dict() self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) lowerCAmelCase__ : str = DummyModel() lowerCAmelCase__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = dummy_dataloaders() lowerCAmelCase__ : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=snake_case__ ) # Train baseline lowerCAmelCase__ : List[str] = Accelerator(project_dir=snake_case__ , project_config=snake_case__ ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save initial accelerator.save_state() ((lowerCAmelCase__) , (lowerCAmelCase__)) : List[str] = model.a.item(), model.b.item() lowerCAmelCase__ : Any = optimizer.state_dict() lowerCAmelCase__ : List[str] = train(3 , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) ((lowerCAmelCase__) , (lowerCAmelCase__)) : Optional[int] = model.a.item(), model.b.item() lowerCAmelCase__ : Dict = optimizer.state_dict() # Train partially set_seed(42 ) lowerCAmelCase__ : Union[str, Any] = DummyModel() lowerCAmelCase__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase__ , lowerCAmelCase__ : str = dummy_dataloaders() lowerCAmelCase__ : Any = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=snake_case__ ) lowerCAmelCase__ : int = Accelerator(project_dir=snake_case__ , project_config=snake_case__ ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) accelerator.load_state(os.path.join(snake_case__ , 'checkpoints' , 'checkpoint_0' ) ) ((lowerCAmelCase__) , (lowerCAmelCase__)) : Tuple = model.a.item(), model.b.item() lowerCAmelCase__ : List[Any] = optimizer.state_dict() self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) lowerCAmelCase__ : List[str] = train(2 , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(snake_case__ , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) ((lowerCAmelCase__) , (lowerCAmelCase__)) : Any = model.a.item(), model.b.item() lowerCAmelCase__ : Any = optimizer.state_dict() self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = torch.tensor([1, 2, 3] ) lowerCAmelCase__ : Tuple = torch.tensor([2, 3, 4] ) lowerCAmelCase__ : List[str] = DummyModel() lowerCAmelCase__ : List[str] = torch.optim.Adam(net.parameters() ) lowerCAmelCase__ : Dict = Accelerator() with self.assertRaises(snake_case__ ) as ve: accelerator.register_for_checkpointing(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase__ : str = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) lowerCAmelCase__ : Optional[Any] = DummyModel() lowerCAmelCase__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase__ : Any = torch.optim.lr_scheduler.StepLR(snake_case__ , step_size=1 , gamma=0.9_9 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = dummy_dataloaders() lowerCAmelCase__ : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=snake_case__ ) # Train baseline lowerCAmelCase__ : Tuple = Accelerator(project_dir=snake_case__ , project_config=snake_case__ ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save initial accelerator.save_state() lowerCAmelCase__ : Optional[int] = scheduler.state_dict() train(3 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.assertNotEqual(snake_case__ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(snake_case__ , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(snake_case__ , scheduler.state_dict() ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) lowerCAmelCase__ : List[str] = DummyModel() lowerCAmelCase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=snake_case__ , total_limit=2 ) # Train baseline lowerCAmelCase__ : str = Accelerator(project_dir=snake_case__ , project_config=snake_case__ ) lowerCAmelCase__ : Optional[Any] = accelerator.prepare(snake_case__ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(snake_case__ , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(snake_case__ , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(snake_case__ , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : int = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase__ = '''/tmp/accelerate/state_checkpointing''' lowerCamelCase__ = DummyModel() lowerCamelCase__ = torch.optim.Adam(params=model.parameters(), lr=1E-3) lowerCamelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) lowerCamelCase__ = dummy_dataloaders() lowerCamelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline lowerCamelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) lowerCamelCase__ = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) lowerCamelCase__ = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: lowerCamelCase__ = group['''params'''][0].device break assert param_device.type == accelerator.device.type lowerCamelCase__ = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""") for group in optimizer.param_groups: lowerCamelCase__ = group['''params'''][0].device break assert ( param_device.type == torch.device("""cpu""").type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""") for group in optimizer.param_groups: lowerCamelCase__ = group['''params'''][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""): accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
705
from itertools import permutations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False lowerCAmelCase__ : str = [7, 11, 13, 17] for i, test in enumerate(SCREAMING_SNAKE_CASE_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int: return sum( int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) for num in permutations(range(SCREAMING_SNAKE_CASE_ ) ) if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
69
0
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowerCamelCase__ = """\ """ lowerCamelCase__ = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ lowerCamelCase__ = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to \'cuda\' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id=\'gpt2\', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=\'\'] >>> results = perplexity.compute(model_id=\'gpt2\', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def _lowerCamelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , ) def _lowerCamelCase ( self : Tuple , a : Optional[Any] , a : Optional[Any] , a : Dict = 16 , a : int = True , a : int=None ): '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": lowerCAmelCase__ : Optional[Any] = "cuda" else: lowerCAmelCase__ : Tuple = "cuda" if torch.cuda.is_available() else "cpu" lowerCAmelCase__ : Tuple = AutoModelForCausalLM.from_pretrained(a_ ) lowerCAmelCase__ : List[str] = model.to(a_ ) lowerCAmelCase__ : Any = AutoTokenizer.from_pretrained(a_ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: lowerCAmelCase__ : str = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(a_ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" lowerCAmelCase__ : List[str] = model.config.max_length - 1 else: lowerCAmelCase__ : int = model.config.max_length lowerCAmelCase__ : Any = tokenizer( a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , return_tensors='pt' , return_attention_mask=a_ , ).to(a_ ) lowerCAmelCase__ : Dict = encodings["input_ids"] lowerCAmelCase__ : Union[str, Any] = encodings["attention_mask"] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." lowerCAmelCase__ : str = [] lowerCAmelCase__ : Union[str, Any] = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 , len(a_ ) , a_ ) ): lowerCAmelCase__ : Optional[Any] = min(start_index + batch_size , len(a_ ) ) lowerCAmelCase__ : List[str] = encoded_texts[start_index:end_index] lowerCAmelCase__ : Optional[Any] = attn_masks[start_index:end_index] if add_start_token: lowerCAmelCase__ : Any = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a_ ) lowerCAmelCase__ : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) lowerCAmelCase__ : Tuple = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a_ ), attn_mask] , dim=1 ) lowerCAmelCase__ : Tuple = encoded_batch with torch.no_grad(): lowerCAmelCase__ : Dict = model(a_ , attention_mask=a_ ).logits lowerCAmelCase__ : List[str] = out_logits[..., :-1, :].contiguous() lowerCAmelCase__ : Any = labels[..., 1:].contiguous() lowerCAmelCase__ : int = attn_mask[..., 1:].contiguous() lowerCAmelCase__ : Optional[int] = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , a_ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(a_ )}
706
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( __magic_name__ , unittest.TestCase ): lowercase = ConsistencyModelPipeline lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt lowercase = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet' , ) return unet @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , ) return unet def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ): '''simple docstring''' if class_cond: lowerCAmelCase__ : Tuple = self.dummy_cond_unet else: lowerCAmelCase__ : Dict = self.dummy_uncond_unet # Default to CM multistep sampler lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : List[Any] = { 'unet': unet, 'scheduler': scheduler, } return components def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ): '''simple docstring''' if str(a ).startswith('mps' ): lowerCAmelCase__ : List[str] = torch.manual_seed(a ) else: lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : str = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [22, 0], 'generator': generator, 'output_type': 'np', } return inputs def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Optional[Any] = self.get_dummy_components() lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Tuple = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = self.get_dummy_inputs(a ) lowerCAmelCase__ : str = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : str = image[0, -3:, -3:, -1] lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a ) lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Tuple = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a ) lowerCAmelCase__ : int = 0 lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Dict = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a ) lowerCAmelCase__ : Optional[Any] = 1 lowerCAmelCase__ : Dict = None lowerCAmelCase__ : List[Any] = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a ) lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Optional[Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a ) lowerCAmelCase__ : Dict = 1 lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : str = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a ) lowerCAmelCase__ : List[Any] = { 'num_inference_steps': None, 'timesteps': [22, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a ) lowerCAmelCase__ : Tuple = latents return inputs def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ): '''simple docstring''' if type(a ) == str: lowerCAmelCase__ : str = torch.device(a ) lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a ) return latents def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Optional[Any] = self.get_inputs() lowerCAmelCase__ : Dict = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : Any = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[str] = self.get_inputs() lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : List[str] = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ): lowerCAmelCase__ : Dict = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : str = image[0, -3:, -3:, -1] lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a ) lowerCAmelCase__ : List[str] = 1 lowerCAmelCase__ : str = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ): lowerCAmelCase__ : List[str] = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
69
0
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : '''simple docstring''' def __init__( self : Optional[Any] , a : List[Any] , a : Optional[int]=13 , a : Any=32 , a : Dict=3 , a : Tuple=4 , a : Union[str, Any]=[10, 20, 30, 40] , a : List[Any]=[2, 2, 3, 2] , a : Optional[Any]=True , a : List[Any]=True , a : Dict=37 , a : Optional[int]="gelu" , a : Optional[Any]=10 , a : Dict=0.0_2 , a : Optional[Any]=["stage2", "stage3", "stage4"] , a : Dict=[2, 3, 4] , a : Tuple=None , ): '''simple docstring''' lowerCAmelCase__ : List[Any] = parent lowerCAmelCase__ : Any = batch_size lowerCAmelCase__ : int = image_size lowerCAmelCase__ : Any = num_channels lowerCAmelCase__ : Dict = num_stages lowerCAmelCase__ : Union[str, Any] = hidden_sizes lowerCAmelCase__ : List[Any] = depths lowerCAmelCase__ : str = is_training lowerCAmelCase__ : str = use_labels lowerCAmelCase__ : int = intermediate_size lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : Tuple = num_labels lowerCAmelCase__ : int = initializer_range lowerCAmelCase__ : Optional[int] = out_features lowerCAmelCase__ : Tuple = out_indices lowerCAmelCase__ : List[Any] = scope def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : str = None if self.use_labels: lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _lowerCamelCase ( self : Any , a : Optional[Any] , a : Optional[int] , a : int ): '''simple docstring''' lowerCAmelCase__ : Dict = ConvNextVaModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Dict = model(a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCamelCase ( self : List[Any] , a : Optional[int] , a : Any , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = ConvNextVaForImageClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : str = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Tuple , a : Union[str, Any] , a : List[str] , a : int ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = ConvNextVaBackbone(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCAmelCase__ : Union[str, Any] = None lowerCAmelCase__ : List[str] = ConvNextVaBackbone(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : int = model(a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = config_and_inputs lowerCAmelCase__ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = config_and_inputs lowerCAmelCase__ : Union[str, Any] = {'pixel_values': pixel_values, 'labels': labels} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[Any] = ConvNextVaModelTester(self ) lowerCAmelCase__ : int = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' return @unittest.skip(reason='ConvNextV2 does not use inputs_embeds' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='ConvNextV2 does not support input and output embeddings' ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='ConvNextV2 does not use feedforward chunking' ) def _lowerCamelCase ( self : str ): '''simple docstring''' pass def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels() lowerCAmelCase__ : List[Any] = True if model_class.__name__ in [ *get_values(a ), *get_values(a ), ]: continue lowerCAmelCase__ : Union[str, Any] = model_class(a ) model.to(a ) model.train() lowerCAmelCase__ : Dict = self._prepare_for_class(a , a , return_labels=a ) lowerCAmelCase__ : Union[str, Any] = model(**a ).loss loss.backward() def _lowerCamelCase ( self : Any ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() lowerCAmelCase__ : Optional[Any] = False lowerCAmelCase__ : List[Any] = True if ( model_class.__name__ in [*get_values(a ), *get_values(a )] or not model_class.supports_gradient_checkpointing ): continue lowerCAmelCase__ : Tuple = model_class(a ) model.to(a ) model.gradient_checkpointing_enable() model.train() lowerCAmelCase__ : int = self._prepare_for_class(a , a , return_labels=a ) lowerCAmelCase__ : str = model(**a ).loss loss.backward() def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Dict = model_class(a ) lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Any = [*signature.parameters.keys()] lowerCAmelCase__ : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' def check_hidden_states_output(a : Any , a : Optional[int] , a : Tuple ): lowerCAmelCase__ : Optional[int] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase__ : Any = self.model_tester.num_stages self.assertEqual(len(a ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : List[str] = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : str = True check_hidden_states_output(a , a , a ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Union[str, Any] = ConvNextVaModel.from_pretrained(a ) self.assertIsNotNone(a ) def lowerCAmelCase__ ( ) -> Optional[int]: lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCamelCase ( self : str ): '''simple docstring''' return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None @slow def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(a ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : Union[str, Any] = prepare_img() lowerCAmelCase__ : Union[str, Any] = preprocessor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowerCAmelCase__ : List[str] = model(**a ) # verify the logits lowerCAmelCase__ : List[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowerCAmelCase__ : Optional[int] = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
707
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class A__ ( __magic_name__ ): def __init__( self : int , a : List[str] , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = params lowerCAmelCase__ : Union[str, Any] = np.array(a ) lowerCAmelCase__ : List[Any] = np.array([len(a ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : str , a : List[str] ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Optional[int] ): '''simple docstring''' return len(self.lengths ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.params.max_model_input_size lowerCAmelCase__ : Optional[int] = self.lengths > max_len logger.info(f'''Splitting {sum(a )} too long sequences.''' ) def divide_chunks(a : List[str] , a : Tuple ): return [l[i : i + n] for i in range(0 , len(a ) , a )] lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Union[str, Any] = [] if self.params.mlm: lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: lowerCAmelCase__ , lowerCAmelCase__ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowerCAmelCase__ : Optional[int] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: lowerCAmelCase__ : Dict = np.insert(a , 0 , a ) if sub_s[-1] != sep_id: lowerCAmelCase__ : Dict = np.insert(a , len(a ) , a ) assert len(a ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(a ) new_tok_ids.extend(a ) new_lengths.extend([len(a ) for l in sub_seqs] ) lowerCAmelCase__ : str = np.array(a ) lowerCAmelCase__ : Optional[Any] = np.array(a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = len(self ) lowerCAmelCase__ : List[Any] = self.lengths > 11 lowerCAmelCase__ : Dict = self.token_ids[indices] lowerCAmelCase__ : Tuple = self.lengths[indices] lowerCAmelCase__ : Any = len(self ) logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: lowerCAmelCase__ : int = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : str = len(self ) lowerCAmelCase__ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowerCAmelCase__ : int = (unk_occs / self.lengths) < 0.5 lowerCAmelCase__ : List[str] = self.token_ids[indices] lowerCAmelCase__ : Optional[Any] = self.lengths[indices] lowerCAmelCase__ : Union[str, Any] = len(self ) logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' if not self.params.is_master: return logger.info(f'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _lowerCamelCase ( self : int , a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = [t[0] for t in batch] lowerCAmelCase__ : List[str] = [t[1] for t in batch] assert len(a ) == len(a ) # Max for paddings lowerCAmelCase__ : List[str] = max(a ) # Pad token ids if self.params.mlm: lowerCAmelCase__ : str = self.params.special_tok_ids['pad_token'] else: lowerCAmelCase__ : Optional[int] = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : Tuple = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids] assert len(tk_ ) == len(a ) assert all(len(a ) == max_seq_len_ for t in tk_ ) lowerCAmelCase__ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) lowerCAmelCase__ : List[str] = torch.tensor(a ) # (bs) return tk_t, lg_t
69
0
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) lowerCAmelCase__ : List[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) sd_pipe.set_scheduler('sample_euler' ) lowerCAmelCase__ : List[str] = 'A painting of a squirrel eating a burger' lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 ) lowerCAmelCase__ : Any = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) lowerCAmelCase__ : str = output.images lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ : Optional[Any] = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowerCAmelCase__ : Optional[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) sd_pipe.set_scheduler('sample_euler' ) lowerCAmelCase__ : int = 'A painting of a squirrel eating a burger' lowerCAmelCase__ : List[str] = torch.manual_seed(0 ) lowerCAmelCase__ : List[Any] = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) lowerCAmelCase__ : Optional[int] = output.images lowerCAmelCase__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ : str = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowerCAmelCase__ : List[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) sd_pipe.set_scheduler('sample_dpmpp_2m' ) lowerCAmelCase__ : List[Any] = 'A painting of a squirrel eating a burger' lowerCAmelCase__ : str = torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ : List[Any] = output.images lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ : Any = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
708
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
69
0
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self : Any , a : Any , a : Tuple=13 , a : Dict=7 , a : Dict=True , a : List[str]=True , a : Optional[int]=True , a : int=True , a : Optional[int]=99 , a : List[Any]=32 , a : Any=5 , a : List[Any]=4 , a : str=37 , a : Any="gelu" , a : int=0.1 , a : Tuple=0.1 , a : Dict=512 , a : Union[str, Any]=16 , a : Dict=2 , a : Any=0.0_2 , a : Any=3 , a : Dict=4 , a : List[Any]=None , ): '''simple docstring''' lowerCAmelCase__ : List[Any] = parent lowerCAmelCase__ : str = batch_size lowerCAmelCase__ : Any = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Dict = use_input_mask lowerCAmelCase__ : int = use_token_type_ids lowerCAmelCase__ : Tuple = use_labels lowerCAmelCase__ : Dict = vocab_size lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : List[str] = num_hidden_layers lowerCAmelCase__ : Dict = num_attention_heads lowerCAmelCase__ : Optional[int] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Dict = attention_probs_dropout_prob lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : str = type_vocab_size lowerCAmelCase__ : Dict = type_sequence_label_size lowerCAmelCase__ : str = initializer_range lowerCAmelCase__ : int = num_labels lowerCAmelCase__ : Tuple = num_choices lowerCAmelCase__ : str = scope def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : Optional[int] = None if self.use_input_mask: lowerCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Any = None if self.use_token_type_ids: lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : List[str] = None if self.use_labels: lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self : Tuple ): '''simple docstring''' return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self : Any , a : Tuple , a : Union[str, Any] , a : str , a : int , a : str , a : Dict , a : str ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowerCAmelCase__ : Union[str, Any] = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowerCAmelCase__ : List[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : Tuple , a : Union[str, Any] , a : List[str] , a : str , a : List[str] , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self : Tuple , a : Any , a : Union[str, Any] , a : List[str] , a : Dict , a : Optional[int] , a : Any , a : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : str = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self : Optional[Any] , a : Dict , a : List[str] , a : List[str] , a : List[str] , a : Dict , a : Dict , a : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.num_labels lowerCAmelCase__ : Optional[Any] = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Any , a : Optional[Any] , a : List[str] , a : List[Any] , a : Optional[Any] , a : List[str] , a : str , a : Any ): '''simple docstring''' lowerCAmelCase__ : Any = self.num_labels lowerCAmelCase__ : Dict = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self : Union[str, Any] , a : Tuple , a : Tuple , a : int , a : List[Any] , a : Tuple , a : int , a : int ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.num_choices lowerCAmelCase__ : List[str] = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : str = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : List[Any] = config_and_inputs lowerCAmelCase__ : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( _A , _A , unittest.TestCase ): lowercase = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) lowercase = ( { 'feature-extraction': NystromformerModel, 'fill-mask': NystromformerForMaskedLM, 'question-answering': NystromformerForQuestionAnswering, 'text-classification': NystromformerForSequenceClassification, 'token-classification': NystromformerForTokenClassification, 'zero-shot': NystromformerForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = NystromformerModelTester(self ) lowerCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase__ : int = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowerCamelCase ( self : str ): '''simple docstring''' for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Union[str, Any] = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class A__ ( unittest.TestCase ): @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Any = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' ) lowerCAmelCase__ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): lowerCAmelCase__ : Dict = model(UpperCamelCase__ )[0] lowerCAmelCase__ : str = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) lowerCAmelCase__ : Any = torch.tensor( [[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : str = 'the [MASK] of Belgium is Brussels' lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' ) lowerCAmelCase__ : Union[str, Any] = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' ) lowerCAmelCase__ : Any = tokenizer(UpperCamelCase__ , return_tensors='pt' ) with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(encoding.input_ids ).logits lowerCAmelCase__ : Tuple = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , 'capital' )
709
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, """tokenizer_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""", }, } lowerCamelCase__ = { """google/rembert""": 256, } lowerCamelCase__ = """▁""" class A__ ( __magic_name__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = RemBertTokenizer def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , ) lowerCAmelCase__ : int = do_lower_case lowerCAmelCase__ : int = remove_space lowerCAmelCase__ : List[Any] = keep_accents lowerCAmelCase__ : Optional[Any] = vocab_file lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Dict = [self.sep_token_id] lowerCAmelCase__ : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1] return [1] + ([0] * len(a )) + [1] def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Tuple = [self.sep_token_id] lowerCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(a ): logger.error('Vocabulary path ({}) should be a directory'.format(a ) ) return lowerCAmelCase__ : int = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
69
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A__ ( snake_case__ , unittest.TestCase ): lowercase = ShapEPipeline lowercase = ['''prompt'''] lowercase = ['''prompt'''] lowercase = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] lowercase = False @property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def _lowerCamelCase ( self : Any ): '''simple docstring''' return self.time_input_dim * 4 @property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return 8 @property def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self : str ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(_A ) @property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : int = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } lowerCAmelCase__ : Optional[Any] = PriorTransformer(**_A ) return model @property def _lowerCamelCase ( self : str ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : List[str] = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } lowerCAmelCase__ : List[Any] = ShapERenderer(**_A ) return model def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.dummy_prior lowerCAmelCase__ : Optional[int] = self.dummy_text_encoder lowerCAmelCase__ : List[Any] = self.dummy_tokenizer lowerCAmelCase__ : str = self.dummy_renderer lowerCAmelCase__ : List[Any] = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , ) lowerCAmelCase__ : Any = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def _lowerCamelCase ( self : Tuple , a : Union[str, Any] , a : Tuple=0 ): '''simple docstring''' if str(_A ).startswith('mps' ): lowerCAmelCase__ : List[Any] = torch.manual_seed(_A ) else: lowerCAmelCase__ : Dict = torch.Generator(device=_A ).manual_seed(_A ) lowerCAmelCase__ : int = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = 'cpu' lowerCAmelCase__ : Any = self.get_dummy_components() lowerCAmelCase__ : Tuple = self.pipeline_class(**_A ) lowerCAmelCase__ : List[str] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) lowerCAmelCase__ : Tuple = pipe(**self.get_dummy_inputs(_A ) ) lowerCAmelCase__ : int = output.images[0] lowerCAmelCase__ : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowerCAmelCase__ : Any = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self : Dict ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[str] = torch_device == 'cpu' lowerCAmelCase__ : Any = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_A , relax_max_difference=_A , ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Any = self.get_dummy_components() lowerCAmelCase__ : Any = self.pipeline_class(**_A ) lowerCAmelCase__ : Dict = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Dict = 2 lowerCAmelCase__ : Tuple = self.get_dummy_inputs(_A ) for key in inputs.keys(): if key in self.batch_params: lowerCAmelCase__ : Optional[int] = batch_size * [inputs[key]] lowerCAmelCase__ : Optional[int] = pipe(**_A , num_images_per_prompt=_A )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) lowerCAmelCase__ : Dict = ShapEPipeline.from_pretrained('openai/shap-e' ) lowerCAmelCase__ : int = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) lowerCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(0 ) lowerCAmelCase__ : Tuple = pipe( 'a shark' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_A , _A )
710
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class A__ ( __magic_name__ ): def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ): '''simple docstring''' super().__init__( features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , ) lowerCAmelCase__ : int = Generator( cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' if self.streaming: lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: lowerCAmelCase__ : Any = None lowerCAmelCase__ : int = None lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : Dict = None self.builder.download_and_prepare( download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , ) lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset( split='train' , verification_mode=a , in_memory=self.keep_in_memory ) return dataset
69
0
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowerCamelCase__ = { """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: if args.student_type == "roberta": lowerCAmelCase__ : int = False elif args.student_type == "gpt2": lowerCAmelCase__ : Union[str, Any] = False def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if args.student_type == "roberta": lowerCAmelCase__ : str = False def lowerCAmelCase__ ( ) -> str: lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=__snake_case , required=__snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=__snake_case , required=__snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=__snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=__snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=__snake_case , required=__snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=__snake_case , type=__snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=__snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=__snake_case , required=__snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=__snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=__snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=__snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=__snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=__snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=__snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=__snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=__snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=__snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=__snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=__snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=__snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=__snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=__snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=__snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=__snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=__snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5e-4 , type=__snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1e-6 , type=__snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=__snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=__snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=__snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=__snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=__snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=__snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=__snake_case , default=500 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=__snake_case , default=4_000 , help='Checkpoint interval.' ) lowerCAmelCase__ : int = parser.parse_args() sanity_checks(__snake_case ) # ARGS # init_gpu_params(__snake_case ) set_seed(__snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(__snake_case ) , __snake_case , indent=4 ) git_log(args.dump_path ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = MODEL_CLASSES[args.student_type] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowerCAmelCase__ : Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name ) lowerCAmelCase__ : Optional[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowerCAmelCase__ : int = tokenizer.all_special_tokens.index(__snake_case ) lowerCAmelCase__ : Tuple = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) lowerCAmelCase__ : Union[str, Any] = special_tok_ids lowerCAmelCase__ : List[str] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: lowerCAmelCase__ : Optional[int] = pickle.load(__snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: lowerCAmelCase__ : Union[str, Any] = pickle.load(__snake_case ) lowerCAmelCase__ : List[str] = np.maximum(__snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowerCAmelCase__ : Optional[Any] = 0.0 # do not predict special tokens lowerCAmelCase__ : Optional[Any] = torch.from_numpy(__snake_case ) else: lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : int = LmSeqsDataset(params=__snake_case , data=__snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) lowerCAmelCase__ : Union[str, Any] = student_config_class.from_pretrained(args.student_config ) lowerCAmelCase__ : Tuple = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) lowerCAmelCase__ : Any = student_model_class.from_pretrained(args.student_pretrained_weights , config=__snake_case ) else: lowerCAmelCase__ : Any = student_model_class(__snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # lowerCAmelCase__ : Any = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__snake_case , __snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__snake_case , __snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowerCAmelCase__ : Union[str, Any] = Distiller( params=__snake_case , dataset=__snake_case , token_probs=__snake_case , student=__snake_case , teacher=__snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
711
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) class A__ ( __magic_name__ ): lowercase = ['audio_values', 'audio_mask'] def __init__( self : Dict , a : Dict=2_048 , a : Optional[Any]=1 , a : List[Any]=[16, 16] , a : Dict=128 , a : List[str]=44_100 , a : Union[str, Any]=86 , a : Optional[Any]=2_048 , a : List[Any]=0.0 , **a : Tuple , ): '''simple docstring''' super().__init__( feature_size=a , sampling_rate=a , padding_value=a , **a , ) lowerCAmelCase__ : Optional[Any] = spectrogram_length lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Tuple = patch_size lowerCAmelCase__ : Optional[int] = feature_size // self.patch_size[1] lowerCAmelCase__ : Union[str, Any] = n_fft lowerCAmelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate lowerCAmelCase__ : int = sampling_rate lowerCAmelCase__ : Union[str, Any] = padding_value lowerCAmelCase__ : Dict = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T def _lowerCamelCase ( self : Optional[int] , a : np.array ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = spectrogram( a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , ) lowerCAmelCase__ : Any = log_spec[:, :-1] lowerCAmelCase__ : Dict = log_spec - 2_0.0 lowerCAmelCase__ : Tuple = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : str , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : int , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowerCAmelCase__ : Dict = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowerCAmelCase__ : List[Any] = is_batched_numpy or ( isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(a , np.ndarray ): lowerCAmelCase__ : Optional[Any] = np.asarray(a , dtype=np.floataa ) elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase__ : Tuple = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCAmelCase__ : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , a ): lowerCAmelCase__ : Optional[Any] = [np.asarray(a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCAmelCase__ : Optional[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCAmelCase__ : Any = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCAmelCase__ : List[Any] = np.array(a ).astype(np.floataa ) # convert into correct format for padding lowerCAmelCase__ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCAmelCase__ : Dict = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCAmelCase__ : Optional[Any] = padded_audio_features * self.padding_value for i in range(len(a ) ): lowerCAmelCase__ : Tuple = audio_features[i] lowerCAmelCase__ : List[str] = feature # return as BatchFeature if return_attention_mask: lowerCAmelCase__ : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: lowerCAmelCase__ : Any = {'audio_values': padded_audio_features} lowerCAmelCase__ : Any = BatchFeature(data=a , tensor_type=a ) return encoded_inputs
69
0
import argparse import json from tqdm import tqdm def lowerCAmelCase__ ( ) -> Tuple: lowerCAmelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--src_path' , type=lowerCamelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , ) parser.add_argument( '--evaluation_set' , type=lowerCamelCase_ , help='where to store parsed evaluation_set file' , ) parser.add_argument( '--gold_data_path' , type=lowerCamelCase_ , help='where to store parsed gold_data_path file' , ) lowerCAmelCase__ : str = parser.parse_args() with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open( args.gold_data_path , 'w' ) as gold_file: lowerCAmelCase__ : Optional[int] = json.load(lowerCamelCase_ ) for dpr_record in tqdm(lowerCamelCase_ ): lowerCAmelCase__ : Optional[int] = dpr_record["""question"""] lowerCAmelCase__ : Any = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + '\n' ) gold_file.write('\t'.join(lowerCamelCase_ ) + '\n' ) if __name__ == "__main__": main()
712
import unittest from transformers import DonutProcessor lowerCamelCase__ = """naver-clova-ix/donut-base""" class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Dict = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } lowerCAmelCase__ : Union[str, Any] = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a ) self.assertDictEqual(a , a )
69
0
lowerCamelCase__ = """\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n""" lowerCamelCase__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCamelCase__ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
713
from numpy import exp, pi, sqrt def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int: return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
69
0
import argparse import json from tqdm import tqdm def lowerCAmelCase__ ( ) -> Optional[int]: lowerCAmelCase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--src_path' , type=lowerCamelCase__ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , ) parser.add_argument( '--evaluation_set' , type=lowerCamelCase__ , help='where to store parsed evaluation_set file' , ) parser.add_argument( '--gold_data_path' , type=lowerCamelCase__ , help='where to store parsed gold_data_path file' , ) lowerCAmelCase__ : Dict = parser.parse_args() with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open( args.gold_data_path , 'w' ) as gold_file: lowerCAmelCase__ : List[str] = json.load(lowerCamelCase__ ) for dpr_record in tqdm(lowerCamelCase__ ): lowerCAmelCase__ : Any = dpr_record["question"] lowerCAmelCase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + '\n' ) gold_file.write('\t'.join(lowerCamelCase__ ) + '\n' ) if __name__ == "__main__": main()
714
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class A__ ( __magic_name__ , unittest.TestCase ): lowercase = XLMTokenizer lowercase = False def _lowerCamelCase ( self : int ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase__ : List[str] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) ) lowerCAmelCase__ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(a ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(a ) ) def _lowerCamelCase ( self : List[str] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : List[Any] = 'lower newer' lowerCAmelCase__ : Any = 'lower newer' return input_text, output_text def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase__ : Optional[int] = 'lower' lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>'] lowerCAmelCase__ : Dict = tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowerCAmelCase__ : Tuple = tokens + ['<unk>'] lowerCAmelCase__ : Optional[int] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) @slow def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : List[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' ) lowerCAmelCase__ : Any = tokenizer.encode('sequence builders' , add_special_tokens=a ) lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a ) lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(a ) lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a , a ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
69
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class A__ ( __magic_name__ ): lowercase = 'facebook/bart-large-mnli' lowercase = ( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) lowercase = 'text_classifier' lowercase = AutoTokenizer lowercase = AutoModelForSequenceClassification lowercase = ['text', ['text']] lowercase = ['text'] def _lowerCamelCase ( self : Any ): '''simple docstring''' super().setup() lowerCAmelCase__ : Union[str, Any] = self.model.config lowerCAmelCase__ : int = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail' ): lowerCAmelCase__ : Optional[Any] = int(lowercase_ ) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' ) def _lowerCamelCase ( self : Any , a : List[Any] , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : int = labels return self.pre_processor( [text] * len(lowercase_ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , ) def _lowerCamelCase ( self : Tuple , a : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = outputs.logits lowerCAmelCase__ : Union[str, Any] = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
715
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = image.size lowerCAmelCase__ , lowerCAmelCase__ : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) lowerCAmelCase__ : int = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0 lowerCAmelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 ) lowerCAmelCase__ : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) return 2.0 * image - 1.0 class A__ ( __magic_name__ ): def __init__( self : List[str] , a : VQModel , a : UNetaDModel , a : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=a , unet=a , scheduler=a ) @torch.no_grad() def __call__( self : int , a : Union[torch.Tensor, PIL.Image.Image] = None , a : Optional[int] = 1 , a : Optional[int] = 100 , a : Optional[float] = 0.0 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ): '''simple docstring''' if isinstance(a , PIL.Image.Image ): lowerCAmelCase__ : str = 1 elif isinstance(a , torch.Tensor ): lowerCAmelCase__ : Union[str, Any] = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a )}''' ) if isinstance(a , PIL.Image.Image ): lowerCAmelCase__ : List[Any] = preprocess(a ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image lowerCAmelCase__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width) lowerCAmelCase__ : Optional[Any] = next(self.unet.parameters() ).dtype lowerCAmelCase__ : List[str] = randn_tensor(a , generator=a , device=self.device , dtype=a ) lowerCAmelCase__ : Any = image.to(device=self.device , dtype=a ) # set timesteps and move to the correct device self.scheduler.set_timesteps(a , device=self.device ) lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler lowerCAmelCase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCAmelCase__ : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCAmelCase__ : List[str] = {} if accepts_eta: lowerCAmelCase__ : List[Any] = eta for t in self.progress_bar(a ): # concat latents and low resolution image in the channel dimension. lowerCAmelCase__ : Union[str, Any] = torch.cat([latents, image] , dim=1 ) lowerCAmelCase__ : Dict = self.scheduler.scale_model_input(a , a ) # predict the noise residual lowerCAmelCase__ : Tuple = self.unet(a , a ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase__ : List[str] = self.scheduler.step(a , a , a , **a ).prev_sample # decode the image latents with the VQVAE lowerCAmelCase__ : Dict = self.vqvae.decode(a ).sample lowerCAmelCase__ : Tuple = torch.clamp(a , -1.0 , 1.0 ) lowerCAmelCase__ : Tuple = image / 2 + 0.5 lowerCAmelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase__ : int = self.numpy_to_pil(a ) if not return_dict: return (image,) return ImagePipelineOutput(images=a )
69
0
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) class A__ ( __lowerCAmelCase ): lowercase = ['''audio_values''', '''audio_mask'''] def __init__( self : Dict , a : Optional[Any]=2_048 , a : str=1 , a : List[Any]=[16, 16] , a : Union[str, Any]=128 , a : List[Any]=44_100 , a : str=86 , a : List[Any]=2_048 , a : Dict=0.0 , **a : Optional[int] , ): '''simple docstring''' super().__init__( feature_size=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , padding_value=lowerCAmelCase_ , **lowerCAmelCase_ , ) lowerCAmelCase__ : Tuple = spectrogram_length lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Optional[Any] = patch_size lowerCAmelCase__ : Union[str, Any] = feature_size // self.patch_size[1] lowerCAmelCase__ : Optional[int] = n_fft lowerCAmelCase__ : List[Any] = sampling_rate // hop_length_to_sampling_rate lowerCAmelCase__ : int = sampling_rate lowerCAmelCase__ : Any = padding_value lowerCAmelCase__ : Optional[Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=lowerCAmelCase_ , norm='slaney' , mel_scale='slaney' , ).T def _lowerCamelCase ( self : int , a : str ): '''simple docstring''' lowerCAmelCase__ : Dict = spectrogram( lowerCAmelCase_ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , ) lowerCAmelCase__ : int = log_spec[:, :-1] lowerCAmelCase__ : Any = log_spec - 2_0.0 lowerCAmelCase__ : int = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : List[Any] , a : Optional[int] , a : List[Any] = None , a : List[Any] = True , a : Optional[Any] = None , a : List[Any] = False , a : int = False , **a : Tuple , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowerCAmelCase__ : List[Any] = isinstance(lowerCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowerCAmelCase__ : List[Any] = is_batched_numpy or ( isinstance(lowerCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase__ : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase_ , np.ndarray ): lowerCAmelCase__ : Tuple = np.asarray(lowerCAmelCase_ , dtype=np.floataa ) elif isinstance(lowerCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase__ : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase__ : Optional[Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCAmelCase__ : Optional[Any] = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , lowerCAmelCase_ ): lowerCAmelCase__ : Optional[int] = [np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCAmelCase__ : Optional[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCAmelCase__ : Tuple = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCAmelCase__ : Any = np.array(lowerCAmelCase_ ).astype(np.floataa ) # convert into correct format for padding lowerCAmelCase__ : List[str] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCAmelCase__ : Optional[int] = np.ones([len(lowerCAmelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCAmelCase__ : Dict = padded_audio_features * self.padding_value for i in range(len(lowerCAmelCase_ ) ): lowerCAmelCase__ : str = audio_features[i] lowerCAmelCase__ : int = feature # return as BatchFeature if return_attention_mask: lowerCAmelCase__ : List[str] = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: lowerCAmelCase__ : int = {'audio_values': padded_audio_features} lowerCAmelCase__ : List[Any] = BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) return encoded_inputs
716
import os from collections import deque import torch from torch.utils.data import Dataset class A__ ( __magic_name__ ): def __init__( self : Union[str, Any] , a : str="" , a : str="train" ): '''simple docstring''' assert os.path.isdir(a ) lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = os.listdir(a ) for story_filename in story_filenames_list: if "summary" in story_filename: continue lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a ) if not os.path.isfile(a ): continue self.documents.append(a ) def __len__( self : Any ): '''simple docstring''' return len(self.documents ) def __getitem__( self : Dict , a : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.documents[idx] lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1] with open(a , encoding='utf-8' ) as source: lowerCAmelCase__ : List[Any] = source.read() lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a ) return document_name, story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) ) # for some unknown reason some lines miss a period, add it lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines] # gather article lines lowerCAmelCase__ : int = [] lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ ) while True: try: lowerCAmelCase__ : int = lines.popleft() if element.startswith('@highlight' ): break story_lines.append(SCREAMING_SNAKE_CASE_ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) ) return story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')'] if line.startswith('@highlight' ): return line if line[-1] in END_TOKENS: return line return line + "." def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if len(SCREAMING_SNAKE_CASE_ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) ) return sequence def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = sequence == pad_token_id lowerCAmelCase__ : Optional[int] = 0 return mask def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines] lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence] lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines] lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = [] for sequence in batch: lowerCAmelCase__ : Union[str, Any] = -1 lowerCAmelCase__ : int = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(SCREAMING_SNAKE_CASE_ ) return torch.tensor(SCREAMING_SNAKE_CASE_ )
69
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class A__ ( __magic_name__ ): lowercase = 'data2vec-text' def __init__( self : Union[str, Any] , a : Any=30_522 , a : List[str]=768 , a : Dict=12 , a : Optional[int]=12 , a : int=3_072 , a : Dict="gelu" , a : List[Any]=0.1 , a : int=0.1 , a : Any=512 , a : List[Any]=2 , a : Any=0.0_2 , a : str=1E-12 , a : Any=1 , a : Union[str, Any]=0 , a : Optional[Any]=2 , a : Optional[Any]="absolute" , a : Optional[Any]=True , a : Union[str, Any]=None , **a : Union[str, Any] , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowerCAmelCase__ : List[str] = vocab_size lowerCAmelCase__ : List[str] = hidden_size lowerCAmelCase__ : str = num_hidden_layers lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : Optional[int] = intermediate_size lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : Union[str, Any] = type_vocab_size lowerCAmelCase__ : Optional[int] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Any = position_embedding_type lowerCAmelCase__ : int = use_cache lowerCAmelCase__ : List[Any] = classifier_dropout class A__ ( __magic_name__ ): @property def _lowerCamelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase__ : List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
717
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCamelCase__ = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowerCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowerCamelCase__ = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") lowerCamelCase__ = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ : int = None # source code of `config_class` lowerCAmelCase__ : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('/' ): lowerCAmelCase__ : Union[str, Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase__ : Dict = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: lowerCAmelCase__ : str = ckpt_name break return checkpoint def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : Union[str, Any] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase__ : Union[str, Any] = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCAmelCase__ : List[str] = '\n'.join(sorted(SCREAMING_SNAKE_CASE_ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
69
0
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) set_seed(770) lowerCamelCase__ = { """c_attn""": """att_proj""", """c_proj""": """out_proj""", """c_fc""": """in_proj""", """transformer.""": """""", """h.""": """layers.""", """ln_1""": """layernorm_1""", """ln_2""": """layernorm_2""", """ln_f""": """layernorm_final""", """wpe""": """position_embeds_layer""", """wte""": """input_embeds_layer""", } lowerCamelCase__ = { """text_small""": { """repo_id""": """suno/bark""", """file_name""": """text.pt""", }, """coarse_small""": { """repo_id""": """suno/bark""", """file_name""": """coarse.pt""", }, """fine_small""": { """repo_id""": """suno/bark""", """file_name""": """fine.pt""", }, """text""": { """repo_id""": """suno/bark""", """file_name""": """text_2.pt""", }, """coarse""": { """repo_id""": """suno/bark""", """file_name""": """coarse_2.pt""", }, """fine""": { """repo_id""": """suno/bark""", """file_name""": """fine_2.pt""", }, } lowerCamelCase__ = os.path.dirname(os.path.abspath(__file__)) lowerCamelCase__ = os.path.join(os.path.expanduser("""~"""), """.cache""") lowerCamelCase__ = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""") def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Tuple: lowerCAmelCase__ : List[Any] = model_type if use_small: key += "_small" return os.path.join(SCREAMING_SNAKE_CASE_ , REMOTE_MODEL_PATHS[key]['file_name'] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) hf_hub_download(repo_id=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , local_dir=SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="text" ) -> int: if model_type == "text": lowerCAmelCase__ : Dict = BarkSemanticModel lowerCAmelCase__ : Optional[int] = BarkSemanticConfig lowerCAmelCase__ : List[str] = BarkSemanticGenerationConfig elif model_type == "coarse": lowerCAmelCase__ : Optional[int] = BarkCoarseModel lowerCAmelCase__ : Tuple = BarkCoarseConfig lowerCAmelCase__ : Any = BarkCoarseGenerationConfig elif model_type == "fine": lowerCAmelCase__ : Tuple = BarkFineModel lowerCAmelCase__ : List[Any] = BarkFineConfig lowerCAmelCase__ : str = BarkFineGenerationConfig else: raise NotImplementedError() lowerCAmelCase__ : Dict = F'''{model_type}_small''' if use_small else model_type lowerCAmelCase__ : Dict = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(SCREAMING_SNAKE_CASE_ ): logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' ) _download(model_info['repo_id'] , model_info['file_name'] ) lowerCAmelCase__ : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ ) # this is a hack lowerCAmelCase__ : Union[str, Any] = checkpoint["model_args"] if "input_vocab_size" not in model_args: lowerCAmelCase__ : str = model_args["vocab_size"] lowerCAmelCase__ : List[Any] = model_args["vocab_size"] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments lowerCAmelCase__ : Any = model_args.pop('n_head' ) lowerCAmelCase__ : Tuple = model_args.pop('n_embd' ) lowerCAmelCase__ : int = model_args.pop('n_layer' ) lowerCAmelCase__ : Optional[Any] = ConfigClass(**checkpoint['model_args'] ) lowerCAmelCase__ : str = ModelClass(config=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Union[str, Any] = GenerationConfigClass() lowerCAmelCase__ : Any = model_generation_config lowerCAmelCase__ : Optional[int] = checkpoint["model"] # fixup checkpoint lowerCAmelCase__ : Optional[Any] = "_orig_mod." for k, v in list(state_dict.items() ): if k.startswith(SCREAMING_SNAKE_CASE_ ): # replace part of the key with corresponding layer name in HF implementation lowerCAmelCase__ : List[Any] = k[len(SCREAMING_SNAKE_CASE_ ) :] for old_layer_name in new_layer_name_dict: lowerCAmelCase__ : Tuple = new_k.replace(SCREAMING_SNAKE_CASE_ , new_layer_name_dict[old_layer_name] ) lowerCAmelCase__ : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() ) lowerCAmelCase__ : Optional[Any] = {k for k in extra_keys if not k.endswith('.attn.bias' )} lowerCAmelCase__ : str = set(model.state_dict().keys() ) - set(state_dict.keys() ) lowerCAmelCase__ : Optional[Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )} if len(SCREAMING_SNAKE_CASE_ ) != 0: raise ValueError(F'''extra keys found: {extra_keys}''' ) if len(SCREAMING_SNAKE_CASE_ ) != 0: raise ValueError(F'''missing keys: {missing_keys}''' ) model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[Any] = model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Union[str, Any] = checkpoint["best_val_loss"].item() logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(SCREAMING_SNAKE_CASE_ , 3 )} loss''' ) model.eval() model.to(SCREAMING_SNAKE_CASE_ ) del checkpoint, state_dict return model def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="text" ) -> Optional[int]: if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() lowerCAmelCase__ : Optional[int] = "cpu" # do conversion on cpu lowerCAmelCase__ : List[Any] = _get_ckpt_path(SCREAMING_SNAKE_CASE_ , use_small=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[int] = _load_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , model_type=SCREAMING_SNAKE_CASE_ , use_small=SCREAMING_SNAKE_CASE_ ) # load bark initial model lowerCAmelCase__ : Tuple = _bark_load_model(SCREAMING_SNAKE_CASE_ , 'cpu' , model_type=SCREAMING_SNAKE_CASE_ , use_small=SCREAMING_SNAKE_CASE_ ) if model_type == "text": lowerCAmelCase__ : Optional[Any] = bark_model["model"] if model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE_ ) != bark_model.get_num_params(): raise ValueError('initial and new models don\'t have the same number of parameters' ) # check if same output as the bark model lowerCAmelCase__ : List[Any] = 5 lowerCAmelCase__ : int = 10 if model_type in ["text", "coarse"]: lowerCAmelCase__ : Union[str, Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) lowerCAmelCase__ : str = bark_model(SCREAMING_SNAKE_CASE_ )[0] lowerCAmelCase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ ) # take last logits lowerCAmelCase__ : List[str] = output_new_model_total.logits[:, [-1], :] else: lowerCAmelCase__ : Dict = 3 lowerCAmelCase__ : Optional[int] = 8 lowerCAmelCase__ : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) lowerCAmelCase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : List[str] = bark_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Union[str, Any] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('initial and new outputs don\'t have the same shape' ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError('initial and new outputs are not equal' ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> List[str]: lowerCAmelCase__ : Any = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = BarkSemanticConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE_ , 'config.json' ) ) lowerCAmelCase__ : List[str] = BarkCoarseConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE_ , 'config.json' ) ) lowerCAmelCase__ : str = BarkFineConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE_ , 'config.json' ) ) lowerCAmelCase__ : Optional[int] = EncodecConfig.from_pretrained('facebook/encodec_24khz' ) lowerCAmelCase__ : str = BarkSemanticModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = BarkCoarseModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Dict = BarkFineModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[int] = EncodecModel.from_pretrained('facebook/encodec_24khz' ) lowerCAmelCase__ : Union[str, Any] = BarkConfig.from_sub_model_configs( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) lowerCAmelCase__ : str = BarkModel(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = semantic lowerCAmelCase__ : Union[str, Any] = coarseAcoustic lowerCAmelCase__ : List[str] = fineAcoustic lowerCAmelCase__ : List[Any] = codec lowerCAmelCase__ : List[str] = bark_generation_config Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) bark.save_pretrained(SCREAMING_SNAKE_CASE_ , repo_id=SCREAMING_SNAKE_CASE_ , push_to_hub=SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""") lowerCamelCase__ = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
718
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
69
0
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class A__ ( __lowerCamelCase ): def __init__( self : List[Any] , a : str=None , a : List[Any]=None , *a : str , **a : Any ): '''simple docstring''' super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if config is None: assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) lowerCAmelCase__ : Any = self.model.config else: lowerCAmelCase__ : Dict = config lowerCAmelCase__ : Optional[int] = data_args lowerCAmelCase__ : Optional[int] = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' ' padding..' ) if self.args.label_smoothing == 0: lowerCAmelCase__ : Any = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowerCAmelCase__ : List[Any] = label_smoothed_nll_loss def _lowerCamelCase ( self : Union[str, Any] , a : List[str] ): '''simple docstring''' if self.optimizer is None: lowerCAmelCase__ : Union[str, Any] = ['bias', 'LayerNorm.weight'] lowerCAmelCase__ : List[Any] = [ { 'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], 'weight_decay': self.args.weight_decay, }, { 'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] lowerCAmelCase__ : Any = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowerCAmelCase__ : Optional[Any] = Adafactor lowerCAmelCase__ : Optional[int] = {'scale_parameter': False, 'relative_step': False} else: lowerCAmelCase__ : int = AdamW lowerCAmelCase__ : Tuple = { 'betas': (self.args.adam_betaa, self.args.adam_betaa), 'eps': self.args.adam_epsilon, } lowerCAmelCase__ : Any = self.args.learning_rate if self.sharded_ddp: lowerCAmelCase__ : Tuple = OSS( params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) else: lowerCAmelCase__ : Dict = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.lr_scheduler is None: lowerCAmelCase__ : Union[str, Any] = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ ) else: # ignoring --lr_scheduler logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' ) def _lowerCamelCase ( self : Dict , a : str ): '''simple docstring''' lowerCAmelCase__ : Dict = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowerCAmelCase__ : int = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowerCAmelCase__ : int = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: lowerCAmelCase__ : str = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ ) return scheduler def _lowerCamelCase ( self : str ): '''simple docstring''' if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Optional[int] ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowerCAmelCase__ : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowerCAmelCase__ : Tuple = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models lowerCAmelCase__ , lowerCAmelCase__ : Any = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2] else: # compute label smoothed loss lowerCAmelCase__ : int = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] lowerCAmelCase__ : Dict = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 ) lowerCAmelCase__ , lowerCAmelCase__ : int = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _lowerCamelCase ( self : List[str] , a : str , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[str] = inputs.pop('labels' ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return loss def _lowerCamelCase ( self : int , a : Dict , a : Optional[Any] , a : str , a : Tuple = None , ): '''simple docstring''' lowerCAmelCase__ : str = self._prepare_inputs(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = { 'max_length': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, 'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowerCAmelCase__ : Dict = self.model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **SCREAMING_SNAKE_CASE_ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowerCAmelCase__ : List[str] = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['max_length'] ) lowerCAmelCase__ : Optional[Any] = inputs.pop('labels' ) with torch.no_grad(): # compute loss on predict data lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowerCAmelCase__ : Union[str, Any] = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowerCAmelCase__ : Optional[int] = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['max_length'] ) return (loss, logits, labels) def _lowerCamelCase ( self : Tuple , a : Union[str, Any] , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( 'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be' f''' padded to `max_length`={max_length}''' ) lowerCAmelCase__ : Optional[Any] = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) lowerCAmelCase__ : Any = tensor return padded_tensor
719
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ = { """configuration_chinese_clip""": [ """CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ChineseCLIPConfig""", """ChineseCLIPOnnxConfig""", """ChineseCLIPTextConfig""", """ChineseCLIPVisionConfig""", ], """processing_chinese_clip""": ["""ChineseCLIPProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ChineseCLIPFeatureExtractor"""] lowerCamelCase__ = ["""ChineseCLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ChineseCLIPModel""", """ChineseCLIPPreTrainedModel""", """ChineseCLIPTextModel""", """ChineseCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
69
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1_000 ) -> Optional[Any]: return sum(e for e in range(3 , SCREAMING_SNAKE_CASE_ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"""{solution() = }""")
720
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowerCamelCase__ = logging.getLogger(__name__) torch.set_grad_enabled(False) lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]: lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict: lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for title, text in zip(documents['title'] , documents['text'] ): if text is not None: for passage in split_text(SCREAMING_SNAKE_CASE_ ): titles.append(title if title is not None else '' ) texts.append(SCREAMING_SNAKE_CASE_ ) return {"title": titles, "text": texts} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict: lowerCAmelCase__ : List[str] = ctx_tokenizer( documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids'] lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: ###################################### logger.info('Step 1 - Create the dataset' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowerCAmelCase__ : str = load_dataset( 'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowerCAmelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc ) # And compute the embeddings lowerCAmelCase__ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowerCAmelCase__ : List[Any] = Features( {'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space lowerCAmelCase__ : List[Any] = dataset.map( partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , ) # And finally save your dataset lowerCAmelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' ) dataset.save_to_disk(SCREAMING_SNAKE_CASE_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('Step 2 - Index the dataset' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowerCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ ) # And save the index lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' ) dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class A__ : lowercase = field( default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowercase = field( default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowercase = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowercase = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowercase = field( default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class A__ : lowercase = field( default=__magic_name__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowercase = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class A__ : lowercase = field( default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowercase = field( default=128 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowerCamelCase__ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
69
0
from math import pi, sqrt def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float: if num <= 0: raise ValueError('math domain error' ) if num > 171.5: raise OverflowError('math range error' ) elif num - int(SCREAMING_SNAKE_CASE_ ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(SCREAMING_SNAKE_CASE_ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowerCAmelCase__ ( ) -> None: assert gamma(0.5 ) == sqrt(SCREAMING_SNAKE_CASE_ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() lowerCamelCase__ = 1.0 while num: lowerCamelCase__ = float(input("""Gamma of: """)) print(F"""gamma({num}) = {gamma(num)}""") print("""\nEnter 0 to exit...""")
721
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A__ ( __magic_name__ ): lowercase = (DDPMParallelScheduler,) def _lowerCamelCase ( self : str , **a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : str = { 'num_train_timesteps': 1_000, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**a ) return config def _lowerCamelCase ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=a ) def _lowerCamelCase ( self : int ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=a , beta_end=a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' self.check_over_configs(thresholding=a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=a , prediction_type=a , sample_max_value=a , ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=a ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : List[str] = scheduler_class(**a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5 def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Any = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : int = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Tuple = self.dummy_model() lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1 lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1 lowerCAmelCase__ : Tuple = samplea.shape[0] lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a ) lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) lowerCAmelCase__ : str = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3 def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.scheduler_classes[0] lowerCAmelCase__ : List[Any] = self.get_scheduler_config() lowerCAmelCase__ : Dict = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Any = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter lowerCAmelCase__ : Tuple = torch.manual_seed(0 ) for t in reversed(range(a ) ): # 1. predict noise residual lowerCAmelCase__ : Optional[Any] = model(a , a ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample lowerCAmelCase__ : List[str] = pred_prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3 def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase__ : int = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Optional[int] = self.dummy_model() lowerCAmelCase__ : List[str] = self.dummy_sample_deter lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(a ) ): # 1. predict noise residual lowerCAmelCase__ : List[Any] = model(a , a ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample lowerCAmelCase__ : str = pred_prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3 def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=a ) lowerCAmelCase__ : List[Any] = scheduler.timesteps for i, timestep in enumerate(a ): if i == len(a ) - 1: lowerCAmelCase__ : Tuple = -1 else: lowerCAmelCase__ : Dict = timesteps[i + 1] lowerCAmelCase__ : str = scheduler.previous_timestep(a ) lowerCAmelCase__ : int = prev_t.item() self.assertEqual(a , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : Optional[int] = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**a ) lowerCAmelCase__ : str = [100, 87, 50, 51, 0] with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : str = [100, 87, 50, 1, 0] lowerCAmelCase__ : int = len(a ) with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=a , timesteps=a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps] with self.assertRaises( a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=a )
69
0
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : str = OrderedDict() for key, value in state_dict.items(): if key.startswith('module.encoder' ): lowerCAmelCase__ : Optional[Any] = key.replace('module.encoder' , 'glpn.encoder' ) if key.startswith('module.decoder' ): lowerCAmelCase__ : Optional[Any] = key.replace('module.decoder' , 'decoder.stages' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase__ : Any = key[key.find('patch_embed' ) + len('patch_embed' )] lowerCAmelCase__ : List[Any] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_lowerCAmelCase )-1}''' ) if "norm" in key: lowerCAmelCase__ : List[str] = key.replace('norm' , 'layer_norm' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase__ : List[str] = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )] lowerCAmelCase__ : Any = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_lowerCAmelCase )-1}''' ) if "layer_norm1" in key: lowerCAmelCase__ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowerCAmelCase__ : Optional[Any] = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase__ : Optional[int] = key[key.find('block' ) + len('block' )] lowerCAmelCase__ : int = key.replace(F'''block{idx}''' , F'''block.{int(_lowerCAmelCase )-1}''' ) if "attn.q" in key: lowerCAmelCase__ : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowerCAmelCase__ : int = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowerCAmelCase__ : List[str] = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowerCAmelCase__ : Union[str, Any] = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowerCAmelCase__ : Optional[Any] = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowerCAmelCase__ : Dict = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowerCAmelCase__ : str = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowerCAmelCase__ : str = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase__ : Dict = key[key.find('linear_c' ) + len('linear_c' )] lowerCAmelCase__ : Any = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_lowerCAmelCase )-1}''' ) if "bot_conv" in key: lowerCAmelCase__ : int = key.replace('bot_conv' , '0.convolution' ) if "skip_conv1" in key: lowerCAmelCase__ : Tuple = key.replace('skip_conv1' , '1.convolution' ) if "skip_conv2" in key: lowerCAmelCase__ : Union[str, Any] = key.replace('skip_conv2' , '2.convolution' ) if "fusion1" in key: lowerCAmelCase__ : Union[str, Any] = key.replace('fusion1' , '1.fusion' ) if "fusion2" in key: lowerCAmelCase__ : Optional[int] = key.replace('fusion2' , '2.fusion' ) if "fusion3" in key: lowerCAmelCase__ : List[str] = key.replace('fusion3' , '3.fusion' ) if "fusion" in key and "conv" in key: lowerCAmelCase__ : List[str] = key.replace('conv' , 'convolutional_layer' ) if key.startswith('module.last_layer_depth' ): lowerCAmelCase__ : Union[str, Any] = key.replace('module.last_layer_depth' , 'head.head' ) lowerCAmelCase__ : Union[str, Any] = value return new_state_dict def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase__ : List[str] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase__ : Tuple = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase__ : Optional[int] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase__ : Tuple = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase__ : Tuple = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase__ : Union[str, Any] = kv_bias[config.hidden_sizes[i] :] def lowerCAmelCase__ ( ): lowerCAmelCase__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None ): lowerCAmelCase__ : Union[str, Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) lowerCAmelCase__ : List[Any] = GLPNImageProcessor() # prepare image lowerCAmelCase__ : List[str] = prepare_img() lowerCAmelCase__ : List[str] = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).pixel_values logger.info('Converting model...' ) # load original state dict lowerCAmelCase__ : List[Any] = torch.load(_lowerCAmelCase , map_location=torch.device('cpu' ) ) # rename keys lowerCAmelCase__ : Union[str, Any] = rename_keys(_lowerCAmelCase ) # key and value matrices need special treatment read_in_k_v(_lowerCAmelCase , _lowerCAmelCase ) # create HuggingFace model and load state dict lowerCAmelCase__ : Optional[int] = GLPNForDepthEstimation(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) model.eval() # forward pass lowerCAmelCase__ : Tuple = model(_lowerCAmelCase ) lowerCAmelCase__ : List[str] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase__ : int = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase__ : Union[str, Any] = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) lowerCAmelCase__ : Union[str, Any] = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) print('Looks ok!' ) # finally, push to hub if required if push_to_hub: logger.info('Pushing model and image processor to the hub...' ) model.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_lowerCAmelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_lowerCAmelCase , ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) lowerCamelCase__ = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
700
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A__ ( __magic_name__ ): lowercase = ['image_processor', 'tokenizer'] lowercase = 'LayoutLMv3ImageProcessor' lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a , ) lowerCAmelCase__ : int = kwargs.pop('feature_extractor' ) lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a , a ) def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(a , a ): lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase__ : List[str] = features['words'] lowerCAmelCase__ : List[Any] = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , ) # add pixel values lowerCAmelCase__ : Tuple = features.pop('pixel_values' ) if return_overflowing_tokens is True: lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] ) lowerCAmelCase__ : List[str] = images return encoded_inputs def _lowerCamelCase ( self : Any , a : List[str] , a : int ): '''simple docstring''' lowerCAmelCase__ : int = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(a ) != len(a ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f''' {len(a )} and {len(a )}''' ) return images_with_overflow def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*a , **a ) def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*a , **a ) @property def _lowerCamelCase ( self : int ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , ) return self.image_processor_class @property def _lowerCamelCase ( self : Dict ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , ) return self.image_processor
69
0
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self : Any , a : Optional[int] , a : List[Any]=13 , a : Union[str, Any]=7 , a : int=True , a : Any=True , a : List[str]=True , a : List[str]=True , a : Dict=True , a : Any=False , a : Optional[int]=False , a : int=False , a : int=2 , a : List[Any]=99 , a : List[str]=0 , a : Any=32 , a : Tuple=5 , a : List[Any]=4 , a : List[Any]=0.1 , a : Any=0.1 , a : Optional[Any]=512 , a : Any=2 , a : List[Any]=0.0_2 , a : Optional[int]=2 , a : Tuple=4 , a : Dict="last" , a : List[Any]=True , a : int=None , a : List[str]=0 , ): '''simple docstring''' lowerCAmelCase__ : Any = parent lowerCAmelCase__ : List[Any] = batch_size lowerCAmelCase__ : List[str] = seq_length lowerCAmelCase__ : Any = is_training lowerCAmelCase__ : str = use_input_lengths lowerCAmelCase__ : Tuple = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = gelu_activation lowerCAmelCase__ : List[str] = sinusoidal_embeddings lowerCAmelCase__ : int = causal lowerCAmelCase__ : Optional[int] = asm lowerCAmelCase__ : Any = n_langs lowerCAmelCase__ : Optional[int] = vocab_size lowerCAmelCase__ : int = n_special lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : List[Any] = num_hidden_layers lowerCAmelCase__ : List[Any] = num_attention_heads lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Any = attention_probs_dropout_prob lowerCAmelCase__ : Dict = max_position_embeddings lowerCAmelCase__ : int = type_sequence_label_size lowerCAmelCase__ : List[str] = initializer_range lowerCAmelCase__ : Any = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Tuple = summary_type lowerCAmelCase__ : Optional[Any] = use_proj lowerCAmelCase__ : List[Any] = scope lowerCAmelCase__ : str = bos_token_id def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_input_lengths: lowerCAmelCase__ : Optional[Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCAmelCase__ : str = None if self.use_token_type_ids: lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCAmelCase__ : str = None lowerCAmelCase__ : Any = None lowerCAmelCase__ : List[str] = None if self.use_labels: lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , 2 ).float() lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Any = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _lowerCamelCase ( self : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , a : List[Any] , a : List[str] , a : Optional[int] , a : int , a : Tuple , a : Tuple , ): '''simple docstring''' lowerCAmelCase__ : List[Any] = XLMModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : str = model(__lowerCamelCase , lengths=__lowerCamelCase , langs=__lowerCamelCase ) lowerCAmelCase__ : List[str] = model(__lowerCamelCase , langs=__lowerCamelCase ) lowerCAmelCase__ : Any = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : List[str] , a : int , a : Any , a : Dict , a : Tuple , a : Union[str, Any] , a : Optional[Any] , a : Any , a : List[Any] , a : List[str] , ): '''simple docstring''' lowerCAmelCase__ : str = XLMWithLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self : Any , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Union[str, Any] , a : Any , a : List[Any] , a : Union[str, Any] , a : Optional[int] , a : Optional[int] , ): '''simple docstring''' lowerCAmelCase__ : Any = XLMForQuestionAnsweringSimple(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : Any = model(__lowerCamelCase ) lowerCAmelCase__ : Tuple = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) lowerCAmelCase__ : Optional[int] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self : Optional[Any] , a : List[Any] , a : List[Any] , a : int , a : Union[str, Any] , a : int , a : str , a : str , a : List[str] , a : str , ): '''simple docstring''' lowerCAmelCase__ : Any = XLMForQuestionAnswering(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ) lowerCAmelCase__ : List[str] = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , p_mask=__lowerCamelCase , ) lowerCAmelCase__ : int = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , ) (lowerCAmelCase__ ) : Optional[Any] = result_with_labels.to_tuple() lowerCAmelCase__ : Dict = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) (lowerCAmelCase__ ) : List[Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _lowerCamelCase ( self : Tuple , a : Union[str, Any] , a : Optional[Any] , a : Optional[int] , a : Tuple , a : int , a : Union[str, Any] , a : List[Any] , a : List[Any] , a : str , ): '''simple docstring''' lowerCAmelCase__ : List[Any] = XLMForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ) lowerCAmelCase__ : List[str] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCamelCase ( self : List[Any] , a : Optional[Any] , a : Tuple , a : Optional[Any] , a : Union[str, Any] , a : List[Any] , a : Any , a : Tuple , a : Any , a : Union[str, Any] , ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Optional[int] = XLMForTokenClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Optional[int] , a : Tuple , a : str , a : Tuple , a : List[Any] , a : Optional[Any] , a : Dict , a : Optional[Any] , ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.num_choices lowerCAmelCase__ : Union[str, Any] = XLMForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Tuple = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( lowerCAmelCase__ ) : str = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class A__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) lowercase = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def _lowerCamelCase ( self : Optional[int] , a : Optional[Any] , a : str , a : Any , a : Any , a : List[Any] ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _lowerCamelCase ( self : Dict , a : Optional[Any] , a : List[str] , a : Tuple=False ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowerCAmelCase__ : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) lowerCAmelCase__ : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : str = XLMModelTester(self ) lowerCAmelCase__ : Tuple = ConfigTester(self , config_class=__lowerCamelCase , emb_dim=37 ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*__lowerCamelCase ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*__lowerCamelCase ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*__lowerCamelCase ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*__lowerCamelCase ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*__lowerCamelCase ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*__lowerCamelCase ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowerCamelCase ) def _lowerCamelCase ( self : Optional[int] , a : str , a : Any , a : Tuple , a : Optional[int] , a : Tuple , a : str=False , a : Optional[Any]=1 ): '''simple docstring''' self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual( [isinstance(__lowerCamelCase , __lowerCamelCase ) for iter_attentions in attentions] , [True] * len(__lowerCamelCase ) ) self.assertEqual(len(__lowerCamelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(__lowerCamelCase ): # adds PAD dummy token lowerCAmelCase__ : Tuple = min_length + idx + 1 lowerCAmelCase__ : Tuple = min_length + idx + 1 lowerCAmelCase__ : List[Any] = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__lowerCamelCase ) ) def _lowerCamelCase ( self : int , a : List[str] , a : List[str] , a : Any , a : Union[str, Any] , a : Any , a : Tuple=False , a : List[Any]=1 ): '''simple docstring''' self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual( [isinstance(__lowerCamelCase , __lowerCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(__lowerCamelCase ) , ) self.assertEqual(len(__lowerCamelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(__lowerCamelCase ): # adds PAD dummy token lowerCAmelCase__ : int = min_length + idx + 1 lowerCAmelCase__ : Union[str, Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__lowerCamelCase ) , ) pass @slow def _lowerCamelCase ( self : Tuple ): '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Optional[int] = XLMModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class A__ ( unittest.TestCase ): @slow def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(__lowerCamelCase ) lowerCAmelCase__ : Optional[int] = torch.tensor([[14, 447]] , dtype=torch.long , device=__lowerCamelCase ) # the president lowerCAmelCase__ : Tuple = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowerCAmelCase__ : List[Any] = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __lowerCamelCase )
701
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class A__ ( __magic_name__ ): def __init__( self : List[str] , a : Optional[Any] , a : int=13 , a : str=7 , a : Any=True , a : List[str]=True , a : Any=False , a : List[Any]=True , a : List[str]=99 , a : Optional[Any]=32 , a : List[str]=5 , a : List[Any]=4 , a : List[Any]=64 , a : List[Any]="gelu" , a : List[Any]=0.1 , a : List[Any]=0.1 , a : int=512 , a : Tuple=16 , a : List[str]=2 , a : int=0.0_2 , a : Union[str, Any]=3 , a : Any=4 , a : Union[str, Any]=None , a : Union[str, Any]=2 , a : List[str]=2 , a : int=2 , a : Dict=2 , a : List[str]=4 , a : str=1 , ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = parent lowerCAmelCase__ : int = batch_size lowerCAmelCase__ : str = seq_length lowerCAmelCase__ : Tuple = is_training lowerCAmelCase__ : List[str] = use_input_mask lowerCAmelCase__ : Optional[int] = use_token_type_ids lowerCAmelCase__ : Any = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : str = hidden_size lowerCAmelCase__ : str = num_hidden_layers lowerCAmelCase__ : List[str] = num_attention_heads lowerCAmelCase__ : int = intermediate_size lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : Optional[int] = type_vocab_size lowerCAmelCase__ : Dict = type_sequence_label_size lowerCAmelCase__ : Optional[int] = initializer_range lowerCAmelCase__ : List[Any] = num_labels lowerCAmelCase__ : Any = num_choices lowerCAmelCase__ : str = scope lowerCAmelCase__ : Any = q_groups lowerCAmelCase__ : Any = k_groups lowerCAmelCase__ : Union[str, Any] = v_groups lowerCAmelCase__ : int = post_attention_groups lowerCAmelCase__ : str = intermediate_groups lowerCAmelCase__ : Union[str, Any] = output_groups def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : Tuple = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : Tuple = None if self.use_labels: lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self : str ): '''simple docstring''' return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : str , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = SqueezeBertModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = model(a , a ) lowerCAmelCase__ : Any = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : int , a : Union[str, Any] , a : Tuple , a : Any ): '''simple docstring''' lowerCAmelCase__ : List[str] = SqueezeBertForMaskedLM(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Any = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[Any] , a : str , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = SqueezeBertForQuestionAnswering(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = model( a , attention_mask=a , start_positions=a , end_positions=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self : Tuple , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : str , a : str , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.num_labels lowerCAmelCase__ : Dict = SqueezeBertForSequenceClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Any , a : int , a : Any , a : Dict , a : Any , a : Tuple , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = self.num_labels lowerCAmelCase__ : Dict = SqueezeBertForTokenClassification(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self : str , a : Optional[int] , a : List[Any] , a : int , a : List[Any] , a : Union[str, Any] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = model( a , attention_mask=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.prepare_config_and_inputs() ((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = config_and_inputs lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) lowercase = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = True lowercase = False def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = SqueezeBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , dim=37 ) def _lowerCamelCase ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a ) @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Optional[int] = SqueezeBertModel.from_pretrained(a ) self.assertIsNotNone(a ) @require_sentencepiece @require_tokenizers @require_torch class A__ ( unittest.TestCase ): @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) lowerCAmelCase__ : str = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] ) lowerCAmelCase__ : Any = model(a )[0] lowerCAmelCase__ : Tuple = torch.Size((1, 3) ) self.assertEqual(output.shape , a ) lowerCAmelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] ) self.assertTrue(torch.allclose(a , a , atol=1E-4 ) )
69
0
class A__ : def __init__( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : str = {} # Mapping from char to TrieNode lowerCAmelCase__ : List[Any] = False def _lowerCamelCase ( self : List[str] , a : list[str] ): '''simple docstring''' for word in words: self.insert(_lowercase ) def _lowerCamelCase ( self : str , a : str ): '''simple docstring''' lowerCAmelCase__ : int = self for char in word: if char not in curr.nodes: lowerCAmelCase__ : Union[str, Any] = TrieNode() lowerCAmelCase__ : str = curr.nodes[char] lowerCAmelCase__ : Tuple = True def _lowerCamelCase ( self : Union[str, Any] , a : str ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self for char in word: if char not in curr.nodes: return False lowerCAmelCase__ : str = curr.nodes[char] return curr.is_leaf def _lowerCamelCase ( self : Optional[int] , a : str ): '''simple docstring''' def _delete(a : TrieNode , a : str , a : int ) -> bool: if index == len(_lowercase ): # If word does not exist if not curr.is_leaf: return False lowerCAmelCase__ : int = False return len(curr.nodes ) == 0 lowerCAmelCase__ : int = word[index] lowerCAmelCase__ : Any = curr.nodes.get(_lowercase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted lowerCAmelCase__ : int = _delete(_lowercase , _lowercase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , _lowercase , 0 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: if node.is_leaf: print(UpperCamelCase__ , end=' ' ) for key, value in node.nodes.items(): print_words(UpperCamelCase__ , word + key ) def lowerCAmelCase__ ( ) -> bool: lowerCAmelCase__ : Any = 'banana bananas bandana band apple all beast'.split() lowerCAmelCase__ : str = TrieNode() root.insert_many(UpperCamelCase__ ) # print_words(root, "") assert all(root.find(UpperCamelCase__ ) for word in words ) assert root.find('banana' ) assert not root.find('bandanas' ) assert not root.find('apps' ) assert root.find('apple' ) assert root.find('all' ) root.delete('all' ) assert not root.find('all' ) root.delete('banana' ) assert not root.find('banana' ) assert root.find('bananas' ) return True def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: print(str(UpperCamelCase__ ) , 'works!' if passes else 'doesn\'t work :(' ) def lowerCAmelCase__ ( ) -> None: assert test_trie() def lowerCAmelCase__ ( ) -> None: print_results('Testing trie functionality' , test_trie() ) if __name__ == "__main__": main()
702
lowerCamelCase__ = """Alexander Joslin""" import operator as op from .stack import Stack def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} lowerCAmelCase__ : Stack[int] = Stack() lowerCAmelCase__ : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) ) elif i in operators: # RULE 2 operator_stack.push(SCREAMING_SNAKE_CASE_ ) elif i == ")": # RULE 4 lowerCAmelCase__ : List[Any] = operator_stack.peek() operator_stack.pop() lowerCAmelCase__ : List[str] = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ : List[Any] = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) operand_stack.push(SCREAMING_SNAKE_CASE_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
69
0
from __future__ import annotations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str: lowerCAmelCase__ : List[Any] = 0.00 lowerCAmelCase__ : int = 0 for resistor in resistors: if resistor <= 0: lowerCAmelCase__ : Dict = F'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__SCREAMING_SNAKE_CASE ) first_sum += 1 / float(__SCREAMING_SNAKE_CASE ) index += 1 return 1 / first_sum def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : Union[str, Any] = 0.00 lowerCAmelCase__ : List[Any] = 0 for resistor in resistors: sum_r += resistor if resistor < 0: lowerCAmelCase__ : str = F'''Resistor at index {index} has a negative value!''' raise ValueError(__SCREAMING_SNAKE_CASE ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
703
import numpy class A__ : def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ): '''simple docstring''' lowerCAmelCase__ : int = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCAmelCase__ : Dict = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCAmelCase__ : List[str] = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCAmelCase__ : str = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : str = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCAmelCase__ : Tuple = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCAmelCase__ : Any = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCAmelCase__ : Optional[Any] = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCAmelCase__ : int = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ): '''simple docstring''' for iteration in range(1 , iterations + 1 ): lowerCAmelCase__ : Any = self.feedforward() self.back_propagation() if give_loss: lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'''Iteration {iteration} Loss: {loss}''' ) def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ): '''simple docstring''' lowerCAmelCase__ : Dict = input_arr lowerCAmelCase__ : Any = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCAmelCase__ : int = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCAmelCase__ : List[Any] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray: return (value) * (1 - (value)) def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : Any = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork( input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
69
0
import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = 0.01 , SCREAMING_SNAKE_CASE_ = 1 , ) -> int: lowerCAmelCase__ : str = False lowerCAmelCase__ : str = search_prob lowerCAmelCase__ : int = start_temperate lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Dict = 0 lowerCAmelCase__ : List[Any] = None while not search_end: lowerCAmelCase__ : Tuple = current_state.score() if best_state is None or current_score > best_state.score(): lowerCAmelCase__ : Any = current_state scores.append(SCREAMING_SNAKE_CASE_ ) iterations += 1 lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : int = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to lowerCAmelCase__ : Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) # picking a random neighbor lowerCAmelCase__ : Optional[Any] = neighbors.pop(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: lowerCAmelCase__ : Dict = change * -1 # in case we are finding minimum if change > 0: # improves the solution lowerCAmelCase__ : Optional[Any] = picked_neighbor else: lowerCAmelCase__ : int = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability lowerCAmelCase__ : Union[str, Any] = picked_neighbor lowerCAmelCase__ : str = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor lowerCAmelCase__ : Any = True else: lowerCAmelCase__ : int = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) lowerCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowerCamelCase__ = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) lowerCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowerCamelCase__ = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: return (3 * x**2) - (6 * y) lowerCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowerCamelCase__ = simulated_annealing(prob, find_max=False, visualization=True) print( """The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" ) lowerCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowerCamelCase__ = simulated_annealing(prob, find_max=True, visualization=True) print( """The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" )
704
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ): '''simple docstring''' lowerCAmelCase__ : str = parent lowerCAmelCase__ : Union[str, Any] = batch_size lowerCAmelCase__ : List[str] = image_size lowerCAmelCase__ : Optional[Any] = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Optional[int] = embed_dim lowerCAmelCase__ : Tuple = depths lowerCAmelCase__ : List[str] = num_heads lowerCAmelCase__ : List[Any] = window_size lowerCAmelCase__ : Any = mlp_ratio lowerCAmelCase__ : Optional[Any] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : int = drop_path_rate lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : int = use_absolute_embeddings lowerCAmelCase__ : List[str] = patch_norm lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = initializer_range lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : List[Any] = scope lowerCAmelCase__ : Dict = use_labels lowerCAmelCase__ : List[Any] = type_sequence_label_size lowerCAmelCase__ : Optional[Any] = encoder_stride def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : Optional[Any] = None if self.use_labels: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : int = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = model(a ) lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : str = model(a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : List[str] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.type_sequence_label_size lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs lowerCAmelCase__ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowercase = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self ) lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) @unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='Swinv2 does not use inputs_embeds' ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[int] = model_class(a ) lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Tuple = [*signature.parameters.keys()] lowerCAmelCase__ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : str = False lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : Dict = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Dict = outputs.attentions lowerCAmelCase__ : Dict = len(self.model_tester.depths ) self.assertEqual(len(a ) , a ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Optional[int] = config.window_size**2 lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCAmelCase__ : Tuple = len(a ) # Check attention is always last and order is fine lowerCAmelCase__ : str = True lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) if hasattr(self.model_tester , 'num_hidden_states_types' ): lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase__ : Any = 2 self.assertEqual(out_len + added_hidden_states , len(a ) ) lowerCAmelCase__ : Dict = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.hidden_states lowerCAmelCase__ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a ) , a ) # Swinv2 has a different seq_length lowerCAmelCase__ : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(a ) , a ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape lowerCAmelCase__ : List[str] = ( reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Any = 3 lowerCAmelCase__ : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase__ : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase__ : str = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a ) self.assertIsNotNone(a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = _config_zero_init(a ) for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(config=a ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class A__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to( a ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**a ) # verify the logits lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
69
0
import unittest from transformers import DonutProcessor lowerCamelCase__ = 'naver-clova-ix/donut-base' class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = DonutProcessor.from_pretrained(a ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Dict = { "name": "John Doe", "age": "99", "city": "Atlanta", "state": "GA", "zip": "30301", "phone": "123-4567", "nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}], } lowerCAmelCase__ : Optional[int] = ( "<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>" "<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>" "<s_nicknames><s_nickname>Johnny</s_nickname>" "<sep/><s_nickname>JD</s_nickname></s_nicknames>" ) lowerCAmelCase__ : Union[str, Any] = self.processor.tokenajson(a ) self.assertDictEqual(a , a )
705
from itertools import permutations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False lowerCAmelCase__ : str = [7, 11, 13, 17] for i, test in enumerate(SCREAMING_SNAKE_CASE_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int: return sum( int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) for num in permutations(range(SCREAMING_SNAKE_CASE_ ) ) if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
69
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: lowerCAmelCase__ : int = len(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : List[Any] = sum(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase__ : List[str] = True for i in range(1 , s + 1 ): lowerCAmelCase__ : str = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase__ : Union[str, Any] = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase__ : List[Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase__ : str = s - 2 * j break return diff
706
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( __magic_name__ , unittest.TestCase ): lowercase = ConsistencyModelPipeline lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt lowercase = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet' , ) return unet @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , ) return unet def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ): '''simple docstring''' if class_cond: lowerCAmelCase__ : Tuple = self.dummy_cond_unet else: lowerCAmelCase__ : Dict = self.dummy_uncond_unet # Default to CM multistep sampler lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : List[Any] = { 'unet': unet, 'scheduler': scheduler, } return components def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ): '''simple docstring''' if str(a ).startswith('mps' ): lowerCAmelCase__ : List[str] = torch.manual_seed(a ) else: lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : str = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [22, 0], 'generator': generator, 'output_type': 'np', } return inputs def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Optional[Any] = self.get_dummy_components() lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Tuple = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = self.get_dummy_inputs(a ) lowerCAmelCase__ : str = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : str = image[0, -3:, -3:, -1] lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a ) lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Tuple = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a ) lowerCAmelCase__ : int = 0 lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Dict = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a ) lowerCAmelCase__ : Optional[Any] = 1 lowerCAmelCase__ : Dict = None lowerCAmelCase__ : List[Any] = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a ) lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Optional[Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a ) lowerCAmelCase__ : Dict = 1 lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : str = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a ) lowerCAmelCase__ : List[Any] = { 'num_inference_steps': None, 'timesteps': [22, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a ) lowerCAmelCase__ : Tuple = latents return inputs def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ): '''simple docstring''' if type(a ) == str: lowerCAmelCase__ : str = torch.device(a ) lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a ) return latents def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Optional[Any] = self.get_inputs() lowerCAmelCase__ : Dict = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : Any = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[str] = self.get_inputs() lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : List[str] = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ): lowerCAmelCase__ : Dict = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : str = image[0, -3:, -3:, -1] lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a ) lowerCAmelCase__ : List[str] = 1 lowerCAmelCase__ : str = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ): lowerCAmelCase__ : List[str] = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
69
0
from __future__ import annotations from collections import deque class A__ : '''simple docstring''' def __init__( self : Any , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : list[dict] = [] self.adlist.append( {'value': '', 'next_states': [], 'fail_state': 0, 'output': []} ) for keyword in keywords: self.add_keyword(a ) self.set_fail_transitions() def _lowerCamelCase ( self : Dict , a : Optional[Any] , a : List[str] ): '''simple docstring''' for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def _lowerCamelCase ( self : List[str] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Any = 0 for character in keyword: lowerCAmelCase__ : Optional[Any] = self.find_next_state(a , a ) if next_state is None: self.adlist.append( { 'value': character, 'next_states': [], 'fail_state': 0, 'output': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) lowerCAmelCase__ : Tuple = len(self.adlist ) - 1 else: lowerCAmelCase__ : Optional[Any] = next_state self.adlist[current_state]["output"].append(a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : deque = deque() for node in self.adlist[0]["next_states"]: q.append(a ) lowerCAmelCase__ : Optional[int] = 0 while q: lowerCAmelCase__ : Optional[int] = q.popleft() for child in self.adlist[r]["next_states"]: q.append(a ) lowerCAmelCase__ : Any = self.adlist[r]["""fail_state"""] while ( self.find_next_state(a , self.adlist[child]['value'] ) is None and state != 0 ): lowerCAmelCase__ : List[Any] = self.adlist[state]["""fail_state"""] lowerCAmelCase__ : List[str] = self.find_next_state( a , self.adlist[child]['value'] ) if self.adlist[child]["fail_state"] is None: lowerCAmelCase__ : Dict = 0 lowerCAmelCase__ : List[str] = ( self.adlist[child]["""output"""] + self.adlist[self.adlist[child]["""fail_state"""]]["""output"""] ) def _lowerCamelCase ( self : Optional[int] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : dict = {} # returns a dict with keywords and list of its occurrences lowerCAmelCase__ : str = 0 for i in range(len(a ) ): while ( self.find_next_state(a , string[i] ) is None and current_state != 0 ): lowerCAmelCase__ : Optional[Any] = self.adlist[current_state]["""fail_state"""] lowerCAmelCase__ : Union[str, Any] = self.find_next_state(a , string[i] ) if next_state is None: lowerCAmelCase__ : List[Any] = 0 else: lowerCAmelCase__ : List[str] = next_state for key in self.adlist[current_state]["output"]: if key not in result: lowerCAmelCase__ : Union[str, Any] = [] result[key].append(i - len(a ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
707
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class A__ ( __magic_name__ ): def __init__( self : int , a : List[str] , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = params lowerCAmelCase__ : Union[str, Any] = np.array(a ) lowerCAmelCase__ : List[Any] = np.array([len(a ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : str , a : List[str] ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Optional[int] ): '''simple docstring''' return len(self.lengths ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.params.max_model_input_size lowerCAmelCase__ : Optional[int] = self.lengths > max_len logger.info(f'''Splitting {sum(a )} too long sequences.''' ) def divide_chunks(a : List[str] , a : Tuple ): return [l[i : i + n] for i in range(0 , len(a ) , a )] lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Union[str, Any] = [] if self.params.mlm: lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: lowerCAmelCase__ , lowerCAmelCase__ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowerCAmelCase__ : Optional[int] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: lowerCAmelCase__ : Dict = np.insert(a , 0 , a ) if sub_s[-1] != sep_id: lowerCAmelCase__ : Dict = np.insert(a , len(a ) , a ) assert len(a ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(a ) new_tok_ids.extend(a ) new_lengths.extend([len(a ) for l in sub_seqs] ) lowerCAmelCase__ : str = np.array(a ) lowerCAmelCase__ : Optional[Any] = np.array(a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = len(self ) lowerCAmelCase__ : List[Any] = self.lengths > 11 lowerCAmelCase__ : Dict = self.token_ids[indices] lowerCAmelCase__ : Tuple = self.lengths[indices] lowerCAmelCase__ : Any = len(self ) logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: lowerCAmelCase__ : int = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : str = len(self ) lowerCAmelCase__ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowerCAmelCase__ : int = (unk_occs / self.lengths) < 0.5 lowerCAmelCase__ : List[str] = self.token_ids[indices] lowerCAmelCase__ : Optional[Any] = self.lengths[indices] lowerCAmelCase__ : Union[str, Any] = len(self ) logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' if not self.params.is_master: return logger.info(f'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _lowerCamelCase ( self : int , a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = [t[0] for t in batch] lowerCAmelCase__ : List[str] = [t[1] for t in batch] assert len(a ) == len(a ) # Max for paddings lowerCAmelCase__ : List[str] = max(a ) # Pad token ids if self.params.mlm: lowerCAmelCase__ : str = self.params.special_tok_ids['pad_token'] else: lowerCAmelCase__ : Optional[int] = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : Tuple = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids] assert len(tk_ ) == len(a ) assert all(len(a ) == max_seq_len_ for t in tk_ ) lowerCAmelCase__ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) lowerCAmelCase__ : List[str] = torch.tensor(a ) # (bs) return tk_t, lg_t
69
0
from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract lowerCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[str]: lowerCAmelCase__ : Optional[Any] = tesseract_config if tesseract_config is not None else '' # apply OCR lowerCAmelCase__ : str = to_pil_image(lowerCAmelCase__ ) lowerCAmelCase__ : List[str] = pil_image.size lowerCAmelCase__ : List[Any] = pytesseract.image_to_data(lowerCAmelCase__ , lang=lowerCAmelCase__ , output_type='dict' , config=lowerCAmelCase__ ) lowerCAmelCase__ : List[Any] = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates lowerCAmelCase__ : Union[str, Any] = [idx for idx, word in enumerate(lowerCAmelCase__ ) if not word.strip()] lowerCAmelCase__ : Dict = [word for idx, word in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices] lowerCAmelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices] lowerCAmelCase__ : Optional[Any] = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices] lowerCAmelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices] lowerCAmelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format lowerCAmelCase__ : Tuple = [] for x, y, w, h in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ : Any = [x, y, x + w, y + h] actual_boxes.append(lowerCAmelCase__ ) # finally, normalize the bounding boxes lowerCAmelCase__ : int = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class A__ ( __A ): lowercase = ["""pixel_values"""] def __init__( self : int , a : Any = True , a : List[str] = None , a : List[Any] = PILImageResampling.BILINEAR , a : Dict = True , a : Optional[int] = None , a : int = "" , **a : List[str] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowerCAmelCase__ : Dict = size if size is not None else {'height': 224, 'width': 224} lowerCAmelCase__ : List[Any] = get_size_dict(UpperCamelCase__ ) lowerCAmelCase__ : str = do_resize lowerCAmelCase__ : Tuple = size lowerCAmelCase__ : List[str] = resample lowerCAmelCase__ : Any = apply_ocr lowerCAmelCase__ : Optional[int] = ocr_lang lowerCAmelCase__ : Union[str, Any] = tesseract_config def _lowerCamelCase ( self : List[Any] , a : Any , a : str , a : Dict = PILImageResampling.BILINEAR , a : Tuple = None , **a : Optional[Any] , ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowerCAmelCase__ : Optional[int] = (size['height'], size['width']) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowerCamelCase ( self : Tuple , a : Optional[int] , a : Optional[int] = None , a : Any = None , a : List[str] = None , a : List[Any] = None , a : int = None , a : Optional[int] = None , a : Tuple = None , a : List[str] = ChannelDimension.FIRST , **a : int , ): '''simple docstring''' lowerCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ : Union[str, Any] = size if size is not None else self.size lowerCAmelCase__ : int = get_size_dict(UpperCamelCase__ ) lowerCAmelCase__ : Optional[int] = resample if resample is not None else self.resample lowerCAmelCase__ : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr lowerCAmelCase__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang lowerCAmelCase__ : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config lowerCAmelCase__ : str = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) # All transformations expect numpy arrays. lowerCAmelCase__ : Any = [to_numpy_array(UpperCamelCase__ ) for image in images] if apply_ocr: requires_backends(self , 'pytesseract' ) lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Dict = [] for image in images: lowerCAmelCase__ : str = apply_tesseract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) words_batch.append(UpperCamelCase__ ) boxes_batch.append(UpperCamelCase__ ) if do_resize: lowerCAmelCase__ : List[Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) lowerCAmelCase__ : Tuple = [flip_channel_order(UpperCamelCase__ ) for image in images] lowerCAmelCase__ : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowerCAmelCase__ : List[Any] = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCamelCase__ ) if apply_ocr: lowerCAmelCase__ : Union[str, Any] = words_batch lowerCAmelCase__ : Union[str, Any] = boxes_batch return data
708
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
69
0
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa lowerCamelCase__ = logging.getLogger(__name__) class A__ ( a__ ): lowercase = "summarization" lowercase = ["loss"] lowercase = ROUGE_KEYS lowercase = "rouge2" def __init__( self : Optional[Any] , a : Optional[int] , **a : Union[str, Any] ): '''simple docstring''' if hparams.sortish_sampler and hparams.gpus > 1: lowerCAmelCase__ : int = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' ) if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' ) super().__init__(lowerCamelCase_ , num_labels=lowerCamelCase_ , mode=self.mode , **lowerCamelCase_ ) use_task_specific_params(self.model , 'summarization' ) save_git_info(self.hparams.output_dir ) lowerCAmelCase__ : Dict = Path(self.output_dir ) / 'metrics.json' lowerCAmelCase__ : int = Path(self.output_dir ) / 'hparams.pkl' pickle_save(self.hparams , self.hparams_save_path ) lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : Union[str, Any] = defaultdict(lowerCamelCase_ ) lowerCAmelCase__ : int = self.config.model_type lowerCAmelCase__ : Dict = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size lowerCAmelCase__ : List[str] = { 'data_dir': self.hparams.data_dir, 'max_source_length': self.hparams.max_source_length, 'prefix': self.model.config.prefix or '', } lowerCAmelCase__ : Optional[Any] = { 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } lowerCAmelCase__ : str = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} lowerCAmelCase__ : int = { 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) lowerCAmelCase__ : List[str] = get_git_info()['repo_sha'] lowerCAmelCase__ : Any = hparams.num_workers lowerCAmelCase__ : Union[str, Any] = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase_ ): lowerCAmelCase__ : Any = self.tokenizer.lang_code_to_id[hparams.tgt_lang] lowerCAmelCase__ : Tuple = self.decoder_start_token_id lowerCAmelCase__ : Tuple = ( SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset ) lowerCAmelCase__ : Any = False lowerCAmelCase__ : str = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: lowerCAmelCase__ : str = self.hparams.eval_max_gen_length else: lowerCAmelCase__ : Optional[int] = self.model.config.max_length lowerCAmelCase__ : Dict = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def _lowerCamelCase ( self : Optional[Any] , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : str = { k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(lowerCamelCase_ , Path(self.output_dir ) / 'text_batch.json' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' ) lowerCAmelCase__ : Dict = True return readable_batch def _lowerCamelCase ( self : Dict , a : Optional[int] , **a : str ): '''simple docstring''' return self.model(lowerCamelCase_ , **lowerCamelCase_ ) def _lowerCamelCase ( self : Dict , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.tokenizer.batch_decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ ) return lmap(str.strip , lowerCamelCase_ ) def _lowerCamelCase ( self : int , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.tokenizer.pad_token_id lowerCAmelCase__ , lowerCAmelCase__ : int = batch['input_ids'], batch['attention_mask'] lowerCAmelCase__ : int = batch['labels'] if isinstance(self.model , lowerCamelCase_ ): lowerCAmelCase__ : List[Any] = self.model._shift_right(lowerCamelCase_ ) else: lowerCAmelCase__ : List[str] = shift_tokens_right(lowerCamelCase_ , lowerCamelCase_ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero lowerCAmelCase__ : Tuple = decoder_input_ids self.save_readable_batch(lowerCamelCase_ ) lowerCAmelCase__ : int = self(lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , use_cache=lowerCamelCase_ ) lowerCAmelCase__ : Optional[int] = outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id lowerCAmelCase__ : List[str] = nn.CrossEntropyLoss(ignore_index=lowerCamelCase_ ) assert lm_logits.shape[-1] == self.vocab_size lowerCAmelCase__ : Dict = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(lowerCamelCase_ , dim=-1 ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = label_smoothed_nll_loss( lowerCamelCase_ , lowerCamelCase_ , self.hparams.label_smoothing , ignore_index=lowerCamelCase_ ) return (loss,) @property def _lowerCamelCase ( self : int ): '''simple docstring''' return self.tokenizer.pad_token_id def _lowerCamelCase ( self : str , a : Optional[int] , a : str ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self._step(lowerCamelCase_ ) lowerCAmelCase__ : int = dict(zip(self.loss_names , lowerCamelCase_ ) ) # tokens per batch lowerCAmelCase__ : Optional[Any] = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum() lowerCAmelCase__ : Tuple = batch['input_ids'].shape[0] lowerCAmelCase__ : Tuple = batch['input_ids'].eq(self.pad ).sum() lowerCAmelCase__ : List[Any] = batch['input_ids'].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def _lowerCamelCase ( self : List[Any] , a : Tuple , a : Dict ): '''simple docstring''' return self._generative_step(lowerCamelCase_ ) def _lowerCamelCase ( self : List[str] , a : List[Any] , a : List[str]="val" ): '''simple docstring''' self.step_count += 1 lowerCAmelCase__ : List[str] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} lowerCAmelCase__ : Any = losses['loss'] lowerCAmelCase__ : Union[str, Any] = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } lowerCAmelCase__ : Optional[Any] = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) lowerCAmelCase__ : Tuple = torch.tensor(lowerCamelCase_ ).type_as(lowerCamelCase_ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowerCamelCase_ ) lowerCAmelCase__ : Any = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()} lowerCAmelCase__ : Optional[Any] = self.step_count self.metrics[prefix].append(lowerCamelCase_ ) # callback writes this to self.metrics_save_path lowerCAmelCase__ : Union[str, Any] = flatten_list([x['preds'] for x in outputs] ) return { "log": all_metrics, "preds": preds, f'''{prefix}_loss''': loss, f'''{prefix}_{self.val_metric}''': metric_tensor, } def _lowerCamelCase ( self : Union[str, Any] , a : Union[str, Any] , a : List[str] ): '''simple docstring''' return calculate_rouge(lowerCamelCase_ , lowerCamelCase_ ) def _lowerCamelCase ( self : int , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : List[Any] = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') lowerCAmelCase__ : List[Any] = self.model.generate( batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowerCamelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) lowerCAmelCase__ : Dict = (time.time() - ta) / batch['input_ids'].shape[0] lowerCAmelCase__ : List[Any] = self.ids_to_clean_text(lowerCamelCase_ ) lowerCAmelCase__ : Tuple = self.ids_to_clean_text(batch['labels'] ) lowerCAmelCase__ : List[Any] = self._step(lowerCamelCase_ ) lowerCAmelCase__ : Any = dict(zip(self.loss_names , lowerCamelCase_ ) ) lowerCAmelCase__ : Optional[int] = self.calc_generative_metrics(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ : Optional[int] = np.mean(lmap(lowerCamelCase_ , lowerCamelCase_ ) ) base_metrics.update(gen_time=lowerCamelCase_ , gen_len=lowerCamelCase_ , preds=lowerCamelCase_ , target=lowerCamelCase_ , **lowerCamelCase_ ) return base_metrics def _lowerCamelCase ( self : Any , a : Union[str, Any] , a : List[Any] ): '''simple docstring''' return self._generative_step(lowerCamelCase_ ) def _lowerCamelCase ( self : List[str] , a : str ): '''simple docstring''' return self.validation_epoch_end(lowerCamelCase_ , prefix='test' ) def _lowerCamelCase ( self : List[Any] , a : int ): '''simple docstring''' lowerCAmelCase__ : str = self.n_obs[type_path] lowerCAmelCase__ : List[Any] = self.target_lens[type_path] lowerCAmelCase__ : Dict = self.dataset_class( self.tokenizer , type_path=lowerCamelCase_ , n_obs=lowerCamelCase_ , max_target_length=lowerCamelCase_ , **self.dataset_kwargs , ) return dataset def _lowerCamelCase ( self : Any , a : List[Any] , a : List[str] , a : List[Any] = False ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.get_dataset(lowerCamelCase_ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": lowerCAmelCase__ : int = dataset.make_sortish_sampler(lowerCamelCase_ , distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": lowerCAmelCase__ : List[str] = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : str = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase_ ) return dataloader def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size ) def _lowerCamelCase ( self : Any ): '''simple docstring''' return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size ) @staticmethod def _lowerCamelCase ( a : Union[str, Any] , a : Optional[int] ): '''simple docstring''' BaseTransformer.add_model_specific_args(lowerCamelCase_ , lowerCamelCase_ ) add_generic_args(lowerCamelCase_ , lowerCamelCase_ ) parser.add_argument( '--max_source_length' , default=1_024 , type=lowerCamelCase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--max_target_length' , default=56 , type=lowerCamelCase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--val_max_target_length' , default=142 , type=lowerCamelCase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--test_max_target_length' , default=142 , type=lowerCamelCase_ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument('--freeze_encoder' , action='store_true' ) parser.add_argument('--freeze_embeds' , action='store_true' ) parser.add_argument('--sortish_sampler' , action='store_true' , default=lowerCamelCase_ ) parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowerCamelCase_ ) parser.add_argument('--max_tokens_per_batch' , type=lowerCamelCase_ , default=lowerCamelCase_ ) parser.add_argument('--logger_name' , type=lowerCamelCase_ , choices=['default', 'wandb', 'wandb_shared'] , default='default' ) parser.add_argument('--n_train' , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help='# examples. -1 means use all.' ) parser.add_argument('--n_val' , type=lowerCamelCase_ , default=500 , required=lowerCamelCase_ , help='# examples. -1 means use all.' ) parser.add_argument('--n_test' , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help='# examples. -1 means use all.' ) parser.add_argument( '--task' , type=lowerCamelCase_ , default='summarization' , required=lowerCamelCase_ , help='# examples. -1 means use all.' ) parser.add_argument('--label_smoothing' , type=lowerCamelCase_ , default=0.0 , required=lowerCamelCase_ ) parser.add_argument('--src_lang' , type=lowerCamelCase_ , default='' , required=lowerCamelCase_ ) parser.add_argument('--tgt_lang' , type=lowerCamelCase_ , default='' , required=lowerCamelCase_ ) parser.add_argument('--eval_beams' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ ) parser.add_argument( '--val_metric' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , choices=['bleu', 'rouge2', 'loss', None] ) parser.add_argument('--eval_max_gen_length' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='never generate more than n tokens' ) parser.add_argument('--save_top_k' , type=lowerCamelCase_ , default=1 , required=lowerCamelCase_ , help='How many checkpoints to save' ) parser.add_argument( '--early_stopping_patience' , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ) , ) return parser class A__ ( a__ ): lowercase = "translation" lowercase = ["loss"] lowercase = ["bleu"] lowercase = "bleu" def __init__( self : Union[str, Any] , a : Dict , **a : Optional[int] ): '''simple docstring''' super().__init__(lowerCamelCase_ , **lowerCamelCase_ ) lowerCAmelCase__ : Union[str, Any] = hparams.src_lang lowerCAmelCase__ : Union[str, Any] = hparams.tgt_lang def _lowerCamelCase ( self : Any , a : int , a : List[str] ): '''simple docstring''' return calculate_bleu(lowerCamelCase_ , lowerCamelCase_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> SummarizationModule: Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) check_output_dir(SCREAMING_SNAKE_CASE_ , expected_items=3 ) if model is None: if "summarization" in args.task: lowerCAmelCase__ : Any = SummarizationModule(SCREAMING_SNAKE_CASE_ ) else: lowerCAmelCase__ : str = TranslationModule(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('/tmp' ) or str(args.output_dir ).startswith('/var' ) ): lowerCAmelCase__ : Optional[Any] = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger lowerCAmelCase__ : Tuple = os.environ.get('WANDB_PROJECT' , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE_ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger lowerCAmelCase__ : Tuple = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: lowerCAmelCase__ : List[str] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: lowerCAmelCase__ : Optional[Any] = False lowerCAmelCase__ : Tuple = args.val_metric == 'loss' lowerCAmelCase__ : Dict = generic_train( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE_ ) , early_stopping_callback=SCREAMING_SNAKE_CASE_ , logger=SCREAMING_SNAKE_CASE_ , ) pickle_save(model.hparams , model.output_dir / 'hparams.pkl' ) if not args.do_predict: return model lowerCAmelCase__ : Any = '' lowerCAmelCase__ : Tuple = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=SCREAMING_SNAKE_CASE_ ) ) if checkpoints: lowerCAmelCase__ : Tuple = checkpoints[-1] lowerCAmelCase__ : Optional[Any] = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() lowerCamelCase__ = pl.Trainer.add_argparse_args(parser) lowerCamelCase__ = SummarizationModule.add_model_specific_args(parser, os.getcwd()) lowerCamelCase__ = parser.parse_args() main(args)
709
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, """tokenizer_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""", }, } lowerCamelCase__ = { """google/rembert""": 256, } lowerCamelCase__ = """▁""" class A__ ( __magic_name__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = RemBertTokenizer def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , ) lowerCAmelCase__ : int = do_lower_case lowerCAmelCase__ : int = remove_space lowerCAmelCase__ : List[Any] = keep_accents lowerCAmelCase__ : Optional[Any] = vocab_file lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Dict = [self.sep_token_id] lowerCAmelCase__ : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1] return [1] + ([0] * len(a )) + [1] def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Tuple = [self.sep_token_id] lowerCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(a ): logger.error('Vocabulary path ({}) should be a directory'.format(a ) ) return lowerCAmelCase__ : int = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
69
0
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class A__ ( lowercase_ ): lowercase = 42 class A__ ( lowercase_ , lowercase_ ): @register_to_config def __init__( self : Tuple , a : int = 32 , a : int = 64 , a : int = 20 , a : int = 768 , a : List[str]=77 , a : List[Any]=4 , a : float = 0.0 , a : str = "silu" , a : Optional[str] = None , a : Optional[str] = None , a : Optional[str] = "linear" , a : Optional[str] = "prd" , a : Optional[int] = None , a : Optional[int] = None , a : Optional[int] = None , ): '''simple docstring''' super().__init__() lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : int = attention_head_dim lowerCAmelCase__ : Dict = num_attention_heads * attention_head_dim lowerCAmelCase__ : List[str] = additional_embeddings lowerCAmelCase__ : int = time_embed_dim or inner_dim lowerCAmelCase__ : List[Any] = embedding_proj_dim or embedding_dim lowerCAmelCase__ : str = clip_embed_dim or embedding_dim lowerCAmelCase__ : Tuple = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0 ) lowerCAmelCase__ : Any = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__ ) lowerCAmelCase__ : Tuple = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) if embedding_proj_norm_type is None: lowerCAmelCase__ : str = None elif embedding_proj_norm_type == "layer": lowerCAmelCase__ : Any = nn.LayerNorm(UpperCamelCase__ ) else: raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' ) lowerCAmelCase__ : Dict = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) if encoder_hid_proj_type is None: lowerCAmelCase__ : Union[str, Any] = None elif encoder_hid_proj_type == "linear": lowerCAmelCase__ : Dict = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) else: raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' ) lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__ ) ) if added_emb_type == "prd": lowerCAmelCase__ : List[str] = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__ ) ) elif added_emb_type is None: lowerCAmelCase__ : Any = None else: raise ValueError( f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' ) lowerCAmelCase__ : Any = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn='gelu' , attention_bias=UpperCamelCase__ , ) for d in range(UpperCamelCase__ ) ] ) if norm_in_type == "layer": lowerCAmelCase__ : Any = nn.LayerNorm(UpperCamelCase__ ) elif norm_in_type is None: lowerCAmelCase__ : Dict = None else: raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' ) lowerCAmelCase__ : int = nn.LayerNorm(UpperCamelCase__ ) lowerCAmelCase__ : Optional[Any] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase__ : List[str] = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 ) causal_attention_mask.triu_(1 ) lowerCAmelCase__ : Tuple = causal_attention_mask[None, ...] self.register_buffer('causal_attention_mask' , UpperCamelCase__ , persistent=UpperCamelCase__ ) lowerCAmelCase__ : Dict = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) ) lowerCAmelCase__ : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Tuple = {} def fn_recursive_add_processors(a : str , a : torch.nn.Module , a : Dict[str, AttentionProcessor] ): if hasattr(UpperCamelCase__ , 'set_processor' ): lowerCAmelCase__ : List[Any] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return processors def _lowerCamelCase ( self : Optional[Any] , a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): '''simple docstring''' lowerCAmelCase__ : Any = len(self.attn_processors.keys() ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != count: raise ValueError( f'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__ )} does not match the''' f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(a : str , a : torch.nn.Module , a : Tuple ): if hasattr(UpperCamelCase__ , 'set_processor' ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): module.set_processor(UpperCamelCase__ ) else: module.set_processor(processor.pop(f'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__ ) for name, module in self.named_children(): fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def _lowerCamelCase ( self : List[str] , a : Dict , a : Union[torch.Tensor, float, int] , a : torch.FloatTensor , a : Optional[torch.FloatTensor] = None , a : Optional[torch.BoolTensor] = None , a : bool = True , ): '''simple docstring''' lowerCAmelCase__ : List[str] = hidden_states.shape[0] lowerCAmelCase__ : Tuple = timestep if not torch.is_tensor(UpperCamelCase__ ): lowerCAmelCase__ : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(UpperCamelCase__ ) and len(timesteps.shape ) == 0: lowerCAmelCase__ : Union[str, Any] = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCAmelCase__ : Tuple = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device ) lowerCAmelCase__ : str = self.time_proj(UpperCamelCase__ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. lowerCAmelCase__ : Dict = timesteps_projected.to(dtype=self.dtype ) lowerCAmelCase__ : List[str] = self.time_embedding(UpperCamelCase__ ) if self.embedding_proj_norm is not None: lowerCAmelCase__ : str = self.embedding_proj_norm(UpperCamelCase__ ) lowerCAmelCase__ : Any = self.embedding_proj(UpperCamelCase__ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: lowerCAmelCase__ : List[str] = self.encoder_hidden_states_proj(UpperCamelCase__ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' ) lowerCAmelCase__ : List[Any] = self.proj_in(UpperCamelCase__ ) lowerCAmelCase__ : Optional[Any] = self.positional_embedding.to(hidden_states.dtype ) lowerCAmelCase__ : int = [] lowerCAmelCase__ : Union[str, Any] = 0 if encoder_hidden_states is not None: additional_embeds.append(UpperCamelCase__ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: lowerCAmelCase__ : Dict = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: lowerCAmelCase__ : Union[str, Any] = hidden_states[:, None, :] lowerCAmelCase__ : List[str] = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: lowerCAmelCase__ : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase__ , -1 , -1 ) additional_embeds.append(UpperCamelCase__ ) lowerCAmelCase__ : Optional[Any] = torch.cat( UpperCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens lowerCAmelCase__ : Any = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: lowerCAmelCase__ : List[Any] = F.pad( UpperCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) lowerCAmelCase__ : Union[str, Any] = hidden_states + positional_embeddings if attention_mask is not None: lowerCAmelCase__ : Any = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0 lowerCAmelCase__ : str = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0 ) lowerCAmelCase__ : Optional[int] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) lowerCAmelCase__ : List[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: lowerCAmelCase__ : Dict = self.norm_in(UpperCamelCase__ ) for block in self.transformer_blocks: lowerCAmelCase__ : Any = block(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) lowerCAmelCase__ : Optional[int] = self.norm_out(UpperCamelCase__ ) if self.prd_embedding is not None: lowerCAmelCase__ : Any = hidden_states[:, -1] else: lowerCAmelCase__ : Optional[int] = hidden_states[:, additional_embeddings_len:] lowerCAmelCase__ : Dict = self.proj_to_clip_embeddings(UpperCamelCase__ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__ ) def _lowerCamelCase ( self : str , a : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
710
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class A__ ( __magic_name__ ): def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ): '''simple docstring''' super().__init__( features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , ) lowerCAmelCase__ : int = Generator( cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' if self.streaming: lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: lowerCAmelCase__ : Any = None lowerCAmelCase__ : int = None lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : Dict = None self.builder.download_and_prepare( download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , ) lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset( split='train' , verification_mode=a , in_memory=self.keep_in_memory ) return dataset
69
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict: if "resnet-50" in model_name: lowerCAmelCase__ : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' ) elif "resnet-101" in model_name: lowerCAmelCase__ : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' ) else: raise ValueError('Model name should include either resnet50 or resnet101' ) lowerCAmelCase__ : Union[str, Any] = DetrConfig(use_timm_backbone=__lowerCAmelCase , backbone_config=__lowerCAmelCase ) # set label attributes lowerCAmelCase__ : str = 'panoptic' in model_name if is_panoptic: lowerCAmelCase__ : str = 250 else: lowerCAmelCase__ : str = 91 lowerCAmelCase__ : Any = 'huggingface/label-files' lowerCAmelCase__ : int = 'coco-detection-id2label.json' lowerCAmelCase__ : Union[str, Any] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) ) lowerCAmelCase__ : str = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCAmelCase__ : Tuple = idalabel lowerCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: lowerCAmelCase__ : str = [] # stem # fmt: off rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') ) rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') ) rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') ) rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') ) rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''', ) ) rename_keys.append( ( F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''', F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) return rename_keys def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ : Union[str, Any] = state_dict.pop(__lowerCAmelCase ) lowerCAmelCase__ : List[str] = val def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict: lowerCAmelCase__ : int = '' if is_panoptic: lowerCAmelCase__ : Any = 'detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowerCAmelCase__ : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCAmelCase__ : str = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ : Union[str, Any] = in_proj_weight[:256, :] lowerCAmelCase__ : Union[str, Any] = in_proj_bias[:256] lowerCAmelCase__ : Optional[Any] = in_proj_weight[256:512, :] lowerCAmelCase__ : Tuple = in_proj_bias[256:512] lowerCAmelCase__ : Optional[int] = in_proj_weight[-256:, :] lowerCAmelCase__ : Optional[int] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention lowerCAmelCase__ : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCAmelCase__ : List[Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ : Any = in_proj_weight[:256, :] lowerCAmelCase__ : Optional[int] = in_proj_bias[:256] lowerCAmelCase__ : Dict = in_proj_weight[256:512, :] lowerCAmelCase__ : Union[str, Any] = in_proj_bias[256:512] lowerCAmelCase__ : Optional[Any] = in_proj_weight[-256:, :] lowerCAmelCase__ : List[str] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention lowerCAmelCase__ : str = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) lowerCAmelCase__ : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict lowerCAmelCase__ : List[str] = in_proj_weight_cross_attn[:256, :] lowerCAmelCase__ : int = in_proj_bias_cross_attn[:256] lowerCAmelCase__ : Tuple = in_proj_weight_cross_attn[256:512, :] lowerCAmelCase__ : List[Any] = in_proj_bias_cross_attn[256:512] lowerCAmelCase__ : Tuple = in_proj_weight_cross_attn[-256:, :] lowerCAmelCase__ : str = in_proj_bias_cross_attn[-256:] def lowerCAmelCase__ ( ) -> str: lowerCAmelCase__ : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase__ : int = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> List[Any]: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = get_detr_config(__lowerCAmelCase ) # load original model from torch hub lowerCAmelCase__ : List[str] = { 'detr-resnet-50': 'detr_resnet50', 'detr-resnet-101': 'detr_resnet101', } logger.info(F'''Converting model {model_name}...''' ) lowerCAmelCase__ : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=__lowerCAmelCase ).eval() lowerCAmelCase__ : List[str] = detr.state_dict() # rename keys for src, dest in create_rename_keys(__lowerCAmelCase ): if is_panoptic: lowerCAmelCase__ : Optional[Any] = 'detr.' + src rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(__lowerCAmelCase , is_panoptic=__lowerCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowerCAmelCase__ : Union[str, Any] = 'detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): lowerCAmelCase__ : Optional[Any] = state_dict.pop(__lowerCAmelCase ) lowerCAmelCase__ : int = val elif "class_labels_classifier" in key or "bbox_predictor" in key: lowerCAmelCase__ : List[Any] = state_dict.pop(__lowerCAmelCase ) lowerCAmelCase__ : Union[str, Any] = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: lowerCAmelCase__ : List[str] = state_dict.pop(__lowerCAmelCase ) lowerCAmelCase__ : Any = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): lowerCAmelCase__ : List[Any] = state_dict.pop(__lowerCAmelCase ) lowerCAmelCase__ : List[Any] = val # finally, create HuggingFace model and load state dict lowerCAmelCase__ : List[str] = DetrForSegmentation(__lowerCAmelCase ) if is_panoptic else DetrForObjectDetection(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # verify our conversion on an image lowerCAmelCase__ : List[str] = 'coco_panoptic' if is_panoptic else 'coco_detection' lowerCAmelCase__ : Optional[Any] = DetrImageProcessor(format=__lowerCAmelCase ) lowerCAmelCase__ : Optional[int] = processor(images=prepare_img() , return_tensors='pt' ) lowerCAmelCase__ : Tuple = encoding['pixel_values'] lowerCAmelCase__ : int = detr(__lowerCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__lowerCAmelCase ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info('Uploading PyTorch model and image processor to the hub...' ) model.push_to_hub(F'''nielsr/{model_name}''' ) processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""detr-resnet-50""", type=str, choices=["""detr-resnet-50""", """detr-resnet-101"""], help="""Name of the DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""") lowerCamelCase__ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
711
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) class A__ ( __magic_name__ ): lowercase = ['audio_values', 'audio_mask'] def __init__( self : Dict , a : Dict=2_048 , a : Optional[Any]=1 , a : List[Any]=[16, 16] , a : Dict=128 , a : List[str]=44_100 , a : Union[str, Any]=86 , a : Optional[Any]=2_048 , a : List[Any]=0.0 , **a : Tuple , ): '''simple docstring''' super().__init__( feature_size=a , sampling_rate=a , padding_value=a , **a , ) lowerCAmelCase__ : Optional[Any] = spectrogram_length lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Tuple = patch_size lowerCAmelCase__ : Optional[int] = feature_size // self.patch_size[1] lowerCAmelCase__ : Union[str, Any] = n_fft lowerCAmelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate lowerCAmelCase__ : int = sampling_rate lowerCAmelCase__ : Union[str, Any] = padding_value lowerCAmelCase__ : Dict = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T def _lowerCamelCase ( self : Optional[int] , a : np.array ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = spectrogram( a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , ) lowerCAmelCase__ : Any = log_spec[:, :-1] lowerCAmelCase__ : Dict = log_spec - 2_0.0 lowerCAmelCase__ : Tuple = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : str , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : int , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowerCAmelCase__ : Dict = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowerCAmelCase__ : List[Any] = is_batched_numpy or ( isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(a , np.ndarray ): lowerCAmelCase__ : Optional[Any] = np.asarray(a , dtype=np.floataa ) elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase__ : Tuple = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCAmelCase__ : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , a ): lowerCAmelCase__ : Optional[Any] = [np.asarray(a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCAmelCase__ : Optional[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCAmelCase__ : Any = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCAmelCase__ : List[Any] = np.array(a ).astype(np.floataa ) # convert into correct format for padding lowerCAmelCase__ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCAmelCase__ : Dict = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCAmelCase__ : Optional[Any] = padded_audio_features * self.padding_value for i in range(len(a ) ): lowerCAmelCase__ : Tuple = audio_features[i] lowerCAmelCase__ : List[str] = feature # return as BatchFeature if return_attention_mask: lowerCAmelCase__ : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: lowerCAmelCase__ : Any = {'audio_values': padded_audio_features} lowerCAmelCase__ : Any = BatchFeature(data=a , tensor_type=a ) return encoded_inputs
69
0
from collections import Counter from timeit import timeit def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "" , ) -> bool: return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "" ) -> bool: if len(a_ ) == 0: return True lowerCAmelCase__ : List[str] = input_str.replace(' ' , '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string lowerCAmelCase__ : dict[str, int] = {} for character in lower_case_input_str: lowerCAmelCase__ : Union[str, Any] = character_freq_dict.get(a_ , 0 ) + 1 lowerCAmelCase__ : Dict = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "" ) -> None: print('\nFor string = ' , a_ , ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(a_ ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) print( '> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(a_ ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) if __name__ == "__main__": lowerCamelCase__ = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) lowerCamelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str) print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
712
import unittest from transformers import DonutProcessor lowerCamelCase__ = """naver-clova-ix/donut-base""" class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Dict = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } lowerCAmelCase__ : Union[str, Any] = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a ) self.assertDictEqual(a , a )
69
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: assert x is not None assert y is not None lowerCAmelCase__ : Any = len(UpperCAmelCase__ ) lowerCAmelCase__ : int = len(UpperCAmelCase__ ) # declaring the array for storing the dp values lowerCAmelCase__ : str = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): lowerCAmelCase__ : Optional[Any] = 1 if x[i - 1] == y[j - 1] else 0 lowerCAmelCase__ : str = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) lowerCAmelCase__ : Optional[Any] = '' lowerCAmelCase__ , lowerCAmelCase__ : str = m, n while i > 0 and j > 0: lowerCAmelCase__ : Dict = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: lowerCAmelCase__ : Optional[Any] = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": lowerCamelCase__ = """AGGTAB""" lowerCamelCase__ = """GXTXAYB""" lowerCamelCase__ = 4 lowerCamelCase__ = """GTAB""" lowerCamelCase__ , lowerCamelCase__ = longest_common_subsequence(a, b) print("""len =""", ln, """, sub-sequence =""", subseq) import doctest doctest.testmod()
713
from numpy import exp, pi, sqrt def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int: return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
69
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_=None ) -> List[Any]: if subparsers is not None: lowerCAmelCase__ : Any = subparsers.add_parser('test' ) else: lowerCAmelCase__ : Dict = argparse.ArgumentParser('Accelerate test command' ) parser.add_argument( '--config_file' , default=UpperCAmelCase__ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=UpperCAmelCase__ ) return parser def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str: lowerCAmelCase__ : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] ) if args.config_file is None: lowerCAmelCase__ : Any = script_name else: lowerCAmelCase__ : Tuple = F'''--config_file={args.config_file} {script_name}''' lowerCAmelCase__ : Tuple = ['accelerate-launch'] + test_args.split() lowerCAmelCase__ : Tuple = execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() ) if result.returncode == 0: print('Test is a success! You are ready for your distributed training!' ) def lowerCAmelCase__ ( ) -> Any: lowerCAmelCase__ : str = test_command_parser() lowerCAmelCase__ : str = parser.parse_args() test_command(UpperCAmelCase__ ) if __name__ == "__main__": main()
714
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class A__ ( __magic_name__ , unittest.TestCase ): lowercase = XLMTokenizer lowercase = False def _lowerCamelCase ( self : int ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase__ : List[str] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) ) lowerCAmelCase__ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(a ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(a ) ) def _lowerCamelCase ( self : List[str] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : List[Any] = 'lower newer' lowerCAmelCase__ : Any = 'lower newer' return input_text, output_text def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase__ : Optional[int] = 'lower' lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>'] lowerCAmelCase__ : Dict = tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowerCAmelCase__ : Tuple = tokens + ['<unk>'] lowerCAmelCase__ : Optional[int] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) @slow def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : List[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' ) lowerCAmelCase__ : Any = tokenizer.encode('sequence builders' , add_special_tokens=a ) lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a ) lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(a ) lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a , a ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
69
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 100 ) -> int: lowerCAmelCase__ : Union[str, Any] = (n * (n + 1) // 2) ** 2 lowerCAmelCase__ : List[str] = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F"""{solution() = }""")
715
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = image.size lowerCAmelCase__ , lowerCAmelCase__ : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) lowerCAmelCase__ : int = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0 lowerCAmelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 ) lowerCAmelCase__ : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) return 2.0 * image - 1.0 class A__ ( __magic_name__ ): def __init__( self : List[str] , a : VQModel , a : UNetaDModel , a : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=a , unet=a , scheduler=a ) @torch.no_grad() def __call__( self : int , a : Union[torch.Tensor, PIL.Image.Image] = None , a : Optional[int] = 1 , a : Optional[int] = 100 , a : Optional[float] = 0.0 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ): '''simple docstring''' if isinstance(a , PIL.Image.Image ): lowerCAmelCase__ : str = 1 elif isinstance(a , torch.Tensor ): lowerCAmelCase__ : Union[str, Any] = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a )}''' ) if isinstance(a , PIL.Image.Image ): lowerCAmelCase__ : List[Any] = preprocess(a ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image lowerCAmelCase__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width) lowerCAmelCase__ : Optional[Any] = next(self.unet.parameters() ).dtype lowerCAmelCase__ : List[str] = randn_tensor(a , generator=a , device=self.device , dtype=a ) lowerCAmelCase__ : Any = image.to(device=self.device , dtype=a ) # set timesteps and move to the correct device self.scheduler.set_timesteps(a , device=self.device ) lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler lowerCAmelCase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCAmelCase__ : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCAmelCase__ : List[str] = {} if accepts_eta: lowerCAmelCase__ : List[Any] = eta for t in self.progress_bar(a ): # concat latents and low resolution image in the channel dimension. lowerCAmelCase__ : Union[str, Any] = torch.cat([latents, image] , dim=1 ) lowerCAmelCase__ : Dict = self.scheduler.scale_model_input(a , a ) # predict the noise residual lowerCAmelCase__ : Tuple = self.unet(a , a ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase__ : List[str] = self.scheduler.step(a , a , a , **a ).prev_sample # decode the image latents with the VQVAE lowerCAmelCase__ : Dict = self.vqvae.decode(a ).sample lowerCAmelCase__ : Tuple = torch.clamp(a , -1.0 , 1.0 ) lowerCAmelCase__ : Tuple = image / 2 + 0.5 lowerCAmelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase__ : int = self.numpy_to_pil(a ) if not return_dict: return (image,) return ImagePipelineOutput(images=a )
69
0
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = Dict[str, Any] lowerCamelCase__ = List[Prediction] @add_end_docstrings(__UpperCAmelCase ) class A__ ( __UpperCAmelCase ): def __init__( self : str , *a : Optional[int] , **a : Union[str, Any] ): '''simple docstring''' super().__init__(*a , **a ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , 'vision' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _lowerCamelCase ( self : Dict , **a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = {} if "threshold" in kwargs: lowerCAmelCase__ : List[str] = kwargs['threshold'] return {}, {}, postprocess_kwargs def __call__( self : Union[str, Any] , *a : List[Any] , **a : Any ): '''simple docstring''' return super().__call__(*a , **a ) def _lowerCamelCase ( self : int , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = load_image(a ) lowerCAmelCase__ : str = torch.IntTensor([[image.height, image.width]] ) lowerCAmelCase__ : Any = self.image_processor(images=[image] , return_tensors='pt' ) if self.tokenizer is not None: lowerCAmelCase__ : Any = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' ) lowerCAmelCase__ : List[Any] = target_size return inputs def _lowerCamelCase ( self : int , a : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = model_inputs.pop('target_size' ) lowerCAmelCase__ : Tuple = self.model(**a ) lowerCAmelCase__ : Dict = outputs.__class__({'target_size': target_size, **outputs} ) if self.tokenizer is not None: lowerCAmelCase__ : List[Any] = model_inputs['bbox'] return model_outputs def _lowerCamelCase ( self : Tuple , a : Tuple , a : int=0.9 ): '''simple docstring''' lowerCAmelCase__ : List[str] = model_outputs['target_size'] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = target_size[0].tolist() def unnormalize(a : Optional[Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1_000), (height * bbox[1] / 1_000), (width * bbox[2] / 1_000), (height * bbox[3] / 1_000), ] ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) lowerCAmelCase__ : Optional[int] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] lowerCAmelCase__ : Optional[int] = [unnormalize(a ) for bbox in model_outputs['bbox'].squeeze(0 )] lowerCAmelCase__ : Optional[int] = ['score', 'label', 'box'] lowerCAmelCase__ : int = [dict(zip(a , a ) ) for vals in zip(scores.tolist() , a , a ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel lowerCAmelCase__ : Tuple = self.image_processor.post_process_object_detection(a , a , a ) lowerCAmelCase__ : Any = raw_annotations[0] lowerCAmelCase__ : List[Any] = raw_annotation['scores'] lowerCAmelCase__ : Any = raw_annotation['labels'] lowerCAmelCase__ : Optional[Any] = raw_annotation['boxes'] lowerCAmelCase__ : Tuple = scores.tolist() lowerCAmelCase__ : Optional[Any] = [self.model.config.idalabel[label.item()] for label in labels] lowerCAmelCase__ : Optional[Any] = [self._get_bounding_box(a ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] lowerCAmelCase__ : Optional[int] = ['score', 'label', 'box'] lowerCAmelCase__ : Optional[Any] = [ dict(zip(a , a ) ) for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] ) ] return annotation def _lowerCamelCase ( self : str , a : Tuple ): '''simple docstring''' if self.framework != "pt": raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = box.int().tolist() lowerCAmelCase__ : Union[str, Any] = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
716
import os from collections import deque import torch from torch.utils.data import Dataset class A__ ( __magic_name__ ): def __init__( self : Union[str, Any] , a : str="" , a : str="train" ): '''simple docstring''' assert os.path.isdir(a ) lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = os.listdir(a ) for story_filename in story_filenames_list: if "summary" in story_filename: continue lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a ) if not os.path.isfile(a ): continue self.documents.append(a ) def __len__( self : Any ): '''simple docstring''' return len(self.documents ) def __getitem__( self : Dict , a : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.documents[idx] lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1] with open(a , encoding='utf-8' ) as source: lowerCAmelCase__ : List[Any] = source.read() lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a ) return document_name, story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) ) # for some unknown reason some lines miss a period, add it lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines] # gather article lines lowerCAmelCase__ : int = [] lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ ) while True: try: lowerCAmelCase__ : int = lines.popleft() if element.startswith('@highlight' ): break story_lines.append(SCREAMING_SNAKE_CASE_ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) ) return story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')'] if line.startswith('@highlight' ): return line if line[-1] in END_TOKENS: return line return line + "." def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if len(SCREAMING_SNAKE_CASE_ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) ) return sequence def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = sequence == pad_token_id lowerCAmelCase__ : Optional[int] = 0 return mask def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines] lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence] lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines] lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = [] for sequence in batch: lowerCAmelCase__ : Union[str, Any] = -1 lowerCAmelCase__ : int = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(SCREAMING_SNAKE_CASE_ ) return torch.tensor(SCREAMING_SNAKE_CASE_ )
69
0
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml lowerCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: def run_func(SCREAMING_SNAKE_CASE_ ): @wraps(SCREAMING_SNAKE_CASE_ ) def run_in_eager_mode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @wraps(SCREAMING_SNAKE_CASE_ ) @tf.function(experimental_compile=SCREAMING_SNAKE_CASE_ ) def run_in_graph_mode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( 'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' ) return run_in_eager_mode else: return run_in_graph_mode return run_func def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: lowerCAmelCase__ : Any = random.Random() lowerCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(SCREAMING_SNAKE_CASE_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class A__ ( __lowerCAmelCase ): lowercase = 42 lowercase = 42 lowercase = "TensorFlow" @property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return tf.__version__ def _lowerCamelCase ( self : Optional[Any] , a : str , a : int , a : int ): '''simple docstring''' lowerCAmelCase__ : int = self.args.strategy if strategy is None: raise ValueError('A device strategy has to be initialized before using TensorFlow.' ) lowerCAmelCase__ : Tuple = self._prepare_inference_func(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return self._measure_speed(_inference ) def _lowerCamelCase ( self : Optional[Any] , a : str , a : int , a : int ): '''simple docstring''' lowerCAmelCase__ : Dict = self.args.strategy if strategy is None: raise ValueError('A device strategy has to be initialized before using TensorFlow.' ) lowerCAmelCase__ : str = self._prepare_train_func(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return self._measure_speed(_train ) def _lowerCamelCase ( self : Union[str, Any] , a : str , a : int , a : int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase__ ) lowerCAmelCase__ : int = self.args.strategy if strategy is None: raise ValueError('A device strategy has to be initialized before using TensorFlow.' ) lowerCAmelCase__ : Optional[Any] = self._prepare_inference_func(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return self._measure_memory(_inference ) def _lowerCamelCase ( self : Tuple , a : str , a : int , a : int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase__ ) lowerCAmelCase__ : Tuple = self.args.strategy if strategy is None: raise ValueError('A device strategy has to be initialized before using TensorFlow.' ) lowerCAmelCase__ : Optional[Any] = self._prepare_train_func(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return self._measure_memory(_train ) def _lowerCamelCase ( self : Union[str, Any] , a : str , a : int , a : int ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError('Mixed precision is currently not supported.' ) lowerCAmelCase__ : int = ( hasattr(lowerCamelCase__ , 'architectures' ) and isinstance(config.architectures , lowerCamelCase__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCAmelCase__ : List[Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model lowerCAmelCase__ : int = __import__('transformers' , fromlist=[model_class] ) lowerCAmelCase__ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ : Optional[int] = model_cls(lowerCamelCase__ ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' ' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' ) else: lowerCAmelCase__ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](lowerCamelCase__ ) # encoder-decoder has vocab size saved differently lowerCAmelCase__ : int = config.vocab_size if hasattr(lowerCamelCase__ , 'vocab_size' ) else config.encoder.vocab_size lowerCAmelCase__ : str = random_input_ids(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ , training=lowerCamelCase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowerCamelCase__ , training=lowerCamelCase__ ) lowerCAmelCase__ : List[str] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _lowerCamelCase ( self : Tuple , a : str , a : int , a : int ): '''simple docstring''' lowerCAmelCase__ : str = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' ) if self.args.fpaa: raise NotImplementedError('Mixed precision is currently not supported.' ) lowerCAmelCase__ : Optional[Any] = ( hasattr(lowerCamelCase__ , 'architectures' ) and isinstance(config.architectures , lowerCamelCase__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCAmelCase__ : Union[str, Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model lowerCAmelCase__ : Any = __import__('transformers' , fromlist=[model_class] ) lowerCAmelCase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ : List[str] = model_cls(lowerCamelCase__ ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' ' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' ) else: lowerCAmelCase__ : List[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase__ ) # encoder-decoder has vocab size saved differently lowerCAmelCase__ : Any = config.vocab_size if hasattr(lowerCamelCase__ , 'vocab_size' ) else config.encoder.vocab_size lowerCAmelCase__ : str = random_input_ids(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): lowerCAmelCase__ : str = model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ )[0] lowerCAmelCase__ : int = tf.gradients(lowerCamelCase__ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): lowerCAmelCase__ : Optional[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ )[0] lowerCAmelCase__ : Any = tf.gradients(lowerCamelCase__ , model.trainable_variables ) return gradients lowerCAmelCase__ : Union[str, Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _lowerCamelCase ( self : Tuple , a : Union[str, Any] ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' ) timeit.repeat(lowerCamelCase__ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average lowerCAmelCase__ : Optional[Any] = timeit.repeat( lowerCamelCase__ , repeat=self.args.repeat , number=10 , ) return min(lowerCamelCase__ ) / 1_0.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def _lowerCamelCase ( self : Union[str, Any] , a : Callable[[], None] ): '''simple docstring''' logger.info( 'Note that TensorFlow allocates more memory than ' 'it might need to speed up computation. ' 'The memory reported here corresponds to the memory ' 'reported by `nvidia-smi`, which can vary depending ' 'on total available memory on the GPU that is used.' ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( '`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory' ' consumption line by line.' ) lowerCAmelCase__ : List[str] = start_memory_tracing('transformers' ) if self.args.is_tpu: # tpu raise NotImplementedError( 'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking' ' with `args.memory=False`' ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( 'py3nvml not installed, we won\'t log GPU memory usage. ' 'Install py3nvml (pip install py3nvml) to log information about GPU.' ) lowerCAmelCase__ : Any = '''N/A''' else: logger.info( 'Measuring total GPU usage on GPU device. Make sure to not have additional processes' ' running on the same GPU.' ) # init nvml nvml.nvmlInit() func() lowerCAmelCase__ : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) lowerCAmelCase__ : Tuple = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase__ ) lowerCAmelCase__ : Dict = meminfo.used lowerCAmelCase__ : int = Memory(lowerCamelCase__ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( 'When enabling line by line tracing, the max peak memory for CPU is inaccurate in' ' TensorFlow.' ) lowerCAmelCase__ : Optional[Any] = None else: lowerCAmelCase__ : Union[str, Any] = measure_peak_memory_cpu(lowerCamelCase__ ) lowerCAmelCase__ : Dict = Memory(lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else memory_bytes if self.args.trace_memory_line_by_line: lowerCAmelCase__ : Tuple = stop_memory_tracing(lowerCamelCase__ ) if memory is None: lowerCAmelCase__ : Dict = summary.total else: lowerCAmelCase__ : Tuple = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
717
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCamelCase__ = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowerCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowerCamelCase__ = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") lowerCamelCase__ = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ : int = None # source code of `config_class` lowerCAmelCase__ : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('/' ): lowerCAmelCase__ : Union[str, Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase__ : Dict = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: lowerCAmelCase__ : str = ckpt_name break return checkpoint def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : Union[str, Any] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase__ : Union[str, Any] = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCAmelCase__ : List[str] = '\n'.join(sorted(SCREAMING_SNAKE_CASE_ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
69
0
'''simple docstring''' import re def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if len(re.findall('[ATCG]' , snake_case_ ) ) != len(snake_case_ ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
718
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
69
0
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class A__ ( lowercase_ ): lowercase = ['''image_processor''', '''tokenizer'''] lowercase = '''OwlViTImageProcessor''' lowercase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : int , a : List[Any]=None , a : Optional[int]=None , **a : str ): '''simple docstring''' lowerCAmelCase__ : Dict = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a , ) lowerCAmelCase__ : Optional[int] = kwargs.pop('feature_extractor' ) lowerCAmelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a , a ) def __call__( self : Optional[Any] , a : Any=None , a : Union[str, Any]=None , a : Union[str, Any]=None , a : Any="max_length" , a : str="np" , **a : Any ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.' ) if text is not None: if isinstance(a , a ) or (isinstance(a , a ) and not isinstance(text[0] , a )): lowerCAmelCase__ : List[str] = [self.tokenizer(a , padding=a , return_tensors=a , **a )] elif isinstance(a , a ) and isinstance(text[0] , a ): lowerCAmelCase__ : Optional[int] = [] # Maximum number of queries across batch lowerCAmelCase__ : Optional[int] = max([len(a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(a ) != max_num_queries: lowerCAmelCase__ : Tuple = t + [' '] * (max_num_queries - len(a )) lowerCAmelCase__ : List[str] = self.tokenizer(a , padding=a , return_tensors=a , **a ) encodings.append(a ) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings' ) if return_tensors == "np": lowerCAmelCase__ : Optional[Any] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) lowerCAmelCase__ : Dict = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCAmelCase__ : List[str] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) lowerCAmelCase__ : List[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCAmelCase__ : Union[str, Any] = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 ) lowerCAmelCase__ : Dict = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCAmelCase__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 ) lowerCAmelCase__ : List[Any] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 ) else: raise ValueError('Target return tensor type could not be returned' ) lowerCAmelCase__ : Dict = BatchEncoding() lowerCAmelCase__ : Dict = input_ids lowerCAmelCase__ : List[str] = attention_mask if query_images is not None: lowerCAmelCase__ : List[Any] = BatchEncoding() lowerCAmelCase__ : Optional[Any] = self.image_processor( a , return_tensors=a , **a ).pixel_values lowerCAmelCase__ : Optional[int] = query_pixel_values if images is not None: lowerCAmelCase__ : int = self.image_processor(a , return_tensors=a , **a ) if text is not None and images is not None: lowerCAmelCase__ : Optional[Any] = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCAmelCase__ : List[str] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**a ) , tensor_type=a ) def _lowerCamelCase ( self : str , *a : Any , **a : Optional[Any] ): '''simple docstring''' return self.image_processor.post_process(*a , **a ) def _lowerCamelCase ( self : Any , *a : str , **a : Union[str, Any] ): '''simple docstring''' return self.image_processor.post_process_object_detection(*a , **a ) def _lowerCamelCase ( self : Union[str, Any] , *a : Any , **a : int ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*a , **a ) def _lowerCamelCase ( self : List[Any] , *a : List[Any] , **a : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*a , **a ) def _lowerCamelCase ( self : List[str] , *a : Optional[int] , **a : Union[str, Any] ): '''simple docstring''' return self.tokenizer.decode(*a , **a ) @property def _lowerCamelCase ( self : Any ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , ) return self.image_processor_class @property def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , ) return self.image_processor
719
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ = { """configuration_chinese_clip""": [ """CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ChineseCLIPConfig""", """ChineseCLIPOnnxConfig""", """ChineseCLIPTextConfig""", """ChineseCLIPVisionConfig""", ], """processing_chinese_clip""": ["""ChineseCLIPProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ChineseCLIPFeatureExtractor"""] lowerCamelCase__ = ["""ChineseCLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ChineseCLIPModel""", """ChineseCLIPPreTrainedModel""", """ChineseCLIPTextModel""", """ChineseCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
69
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { """configuration_informer""": [ """INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """InformerForPrediction""", """InformerModel""", """InformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
720
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowerCamelCase__ = logging.getLogger(__name__) torch.set_grad_enabled(False) lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]: lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict: lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for title, text in zip(documents['title'] , documents['text'] ): if text is not None: for passage in split_text(SCREAMING_SNAKE_CASE_ ): titles.append(title if title is not None else '' ) texts.append(SCREAMING_SNAKE_CASE_ ) return {"title": titles, "text": texts} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict: lowerCAmelCase__ : List[str] = ctx_tokenizer( documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids'] lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: ###################################### logger.info('Step 1 - Create the dataset' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowerCAmelCase__ : str = load_dataset( 'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowerCAmelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc ) # And compute the embeddings lowerCAmelCase__ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowerCAmelCase__ : List[Any] = Features( {'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space lowerCAmelCase__ : List[Any] = dataset.map( partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , ) # And finally save your dataset lowerCAmelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' ) dataset.save_to_disk(SCREAMING_SNAKE_CASE_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('Step 2 - Index the dataset' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowerCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ ) # And save the index lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' ) dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class A__ : lowercase = field( default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowercase = field( default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowercase = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowercase = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowercase = field( default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class A__ : lowercase = field( default=__magic_name__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowercase = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class A__ : lowercase = field( default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowercase = field( default=128 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowerCamelCase__ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
69
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
721
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A__ ( __magic_name__ ): lowercase = (DDPMParallelScheduler,) def _lowerCamelCase ( self : str , **a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : str = { 'num_train_timesteps': 1_000, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**a ) return config def _lowerCamelCase ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=a ) def _lowerCamelCase ( self : int ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=a , beta_end=a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' self.check_over_configs(thresholding=a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=a , prediction_type=a , sample_max_value=a , ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=a ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : List[str] = scheduler_class(**a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5 def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Any = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : int = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Tuple = self.dummy_model() lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1 lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1 lowerCAmelCase__ : Tuple = samplea.shape[0] lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a ) lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) lowerCAmelCase__ : str = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3 def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.scheduler_classes[0] lowerCAmelCase__ : List[Any] = self.get_scheduler_config() lowerCAmelCase__ : Dict = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Any = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter lowerCAmelCase__ : Tuple = torch.manual_seed(0 ) for t in reversed(range(a ) ): # 1. predict noise residual lowerCAmelCase__ : Optional[Any] = model(a , a ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample lowerCAmelCase__ : List[str] = pred_prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3 def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase__ : int = scheduler_class(**a ) lowerCAmelCase__ : str = len(a ) lowerCAmelCase__ : Optional[int] = self.dummy_model() lowerCAmelCase__ : List[str] = self.dummy_sample_deter lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 ) for t in reversed(range(a ) ): # 1. predict noise residual lowerCAmelCase__ : List[Any] = model(a , a ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample lowerCAmelCase__ : str = pred_prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3 def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=a ) lowerCAmelCase__ : List[Any] = scheduler.timesteps for i, timestep in enumerate(a ): if i == len(a ) - 1: lowerCAmelCase__ : Tuple = -1 else: lowerCAmelCase__ : Dict = timesteps[i + 1] lowerCAmelCase__ : str = scheduler.previous_timestep(a ) lowerCAmelCase__ : int = prev_t.item() self.assertEqual(a , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : Optional[int] = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**a ) lowerCAmelCase__ : str = [100, 87, 50, 51, 0] with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : str = [100, 87, 50, 1, 0] lowerCAmelCase__ : int = len(a ) with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=a , timesteps=a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**a ) lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps] with self.assertRaises( a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=a )
69
0
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( __magic_name__ , unittest.TestCase ): lowercase = GPTSanJapaneseTokenizer lowercase = False lowercase = {'do_clean_text': False, 'add_prefix_space': False} def _lowerCamelCase ( self : Tuple ): '''simple docstring''' super().setUp() # fmt: off lowerCAmelCase__ : Union[str, Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on lowerCAmelCase__ : List[Any] = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 lowerCAmelCase__ : Optional[Any] = {'unk_token': '<unk>'} lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file , 'w' ) as emoji_writer: emoji_writer.write(json.dumps(a ) ) def _lowerCamelCase ( self : int , **a : Tuple ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a ) def _lowerCamelCase ( self : str , a : Any ): '''simple docstring''' lowerCAmelCase__ : int = 'こんにちは、世界。 \nこんばんは、㔺界。😀' lowerCAmelCase__ : List[Any] = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def _lowerCamelCase ( self : List[str] , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : Any = self.get_input_output_texts(a ) lowerCAmelCase__ : List[Any] = tokenizer.encode(a , add_special_tokens=a ) lowerCAmelCase__ : Any = tokenizer.decode(a , clean_up_tokenization_spaces=a ) return text, ids def _lowerCamelCase ( self : List[str] ): '''simple docstring''' pass # TODO add if relevant def _lowerCamelCase ( self : Dict ): '''simple docstring''' pass # TODO add if relevant def _lowerCamelCase ( self : Any ): '''simple docstring''' pass # TODO add if relevant def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer() # Testing tokenization lowerCAmelCase__ : Any = 'こんにちは、世界。 こんばんは、㔺界。' lowerCAmelCase__ : str = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] lowerCAmelCase__ : int = tokenizer.tokenize(a ) self.assertListEqual(a , a ) # Testing conversion to ids without special tokens lowerCAmelCase__ : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(a ) self.assertListEqual(a , a ) # Testing conversion to ids with special tokens lowerCAmelCase__ : Optional[Any] = tokens + [tokenizer.unk_token] lowerCAmelCase__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(a ) self.assertListEqual(a , a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Any = self.get_tokenizer() # Testing tokenization lowerCAmelCase__ : int = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' lowerCAmelCase__ : List[str] = 'こんにちは、、、、世界。こんばんは、、、、世界。' lowerCAmelCase__ : Optional[Any] = tokenizer.encode(a ) lowerCAmelCase__ : List[Any] = tokenizer.decode(a ) self.assertEqual(a , a ) @slow def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization lowerCAmelCase__ : Union[str, Any] = 'こんにちは、世界。' lowerCAmelCase__ : Optional[int] = 'こんばんは、㔺界。😀' lowerCAmelCase__ : Dict = 'こんにちは、世界。こんばんは、世界。😀' lowerCAmelCase__ : str = tokenizer.encode(prefix_text + input_text ) lowerCAmelCase__ : Any = tokenizer.encode('' , prefix_text=prefix_text + input_text ) lowerCAmelCase__ : Dict = tokenizer.encode(a , prefix_text=a ) lowerCAmelCase__ : List[str] = tokenizer.decode(a ) lowerCAmelCase__ : Any = tokenizer.decode(a ) lowerCAmelCase__ : Union[str, Any] = tokenizer.decode(a ) self.assertEqual(a , a ) self.assertEqual(a , a ) self.assertEqual(a , a ) @slow def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization lowerCAmelCase__ : Union[str, Any] = 'こんにちは、世界。' lowerCAmelCase__ : str = 'こんばんは、㔺界。😀' lowerCAmelCase__ : Optional[Any] = len(tokenizer.encode(a ) ) - 2 lowerCAmelCase__ : Optional[int] = len(tokenizer.encode(a ) ) - 2 lowerCAmelCase__ : int = [1] + [0] * (len_prefix + len_text + 1) lowerCAmelCase__ : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0] lowerCAmelCase__ : Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1) lowerCAmelCase__ : Optional[int] = tokenizer(prefix_text + input_text ).token_type_ids lowerCAmelCase__ : List[Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids lowerCAmelCase__ : List[str] = tokenizer(a , prefix_text=a ).token_type_ids self.assertListEqual(a , a ) self.assertListEqual(a , a ) self.assertListEqual(a , a ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) lowerCAmelCase__ : str = tokenizer.encode('あンいワ' ) lowerCAmelCase__ : str = tokenizer.encode('' , prefix_text='あンいワ' ) lowerCAmelCase__ : List[Any] = tokenizer.encode('いワ' , prefix_text='あン' ) self.assertEqual(tokenizer.decode(a ) , tokenizer.decode(a ) ) self.assertEqual(tokenizer.decode(a ) , tokenizer.decode(a ) ) self.assertNotEqual(a , a ) self.assertNotEqual(a , a ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) lowerCAmelCase__ : Optional[Any] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] lowerCAmelCase__ : Any = tokenizer(a , padding=a ) lowerCAmelCase__ : int = tokenizer.batch_encode_plus(a , padding=a ) # fmt: off lowerCAmelCase__ : int = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]] lowerCAmelCase__ : Union[str, Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] lowerCAmelCase__ : str = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , a ) self.assertListEqual(x_token.token_type_ids , a ) self.assertListEqual(x_token.attention_mask , a ) self.assertListEqual(x_token_a.input_ids , a ) self.assertListEqual(x_token_a.token_type_ids , a ) self.assertListEqual(x_token_a.attention_mask , a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' pass
700
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A__ ( __magic_name__ ): lowercase = ['image_processor', 'tokenizer'] lowercase = 'LayoutLMv3ImageProcessor' lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a , ) lowerCAmelCase__ : int = kwargs.pop('feature_extractor' ) lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a , a ) def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(a , a ): lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase__ : List[str] = features['words'] lowerCAmelCase__ : List[Any] = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , ) # add pixel values lowerCAmelCase__ : Tuple = features.pop('pixel_values' ) if return_overflowing_tokens is True: lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] ) lowerCAmelCase__ : List[str] = images return encoded_inputs def _lowerCamelCase ( self : Any , a : List[str] , a : int ): '''simple docstring''' lowerCAmelCase__ : int = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(a ) != len(a ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f''' {len(a )} and {len(a )}''' ) return images_with_overflow def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*a , **a ) def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*a , **a ) @property def _lowerCamelCase ( self : int ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , ) return self.image_processor_class @property def _lowerCamelCase ( self : Dict ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , ) return self.image_processor
69
0
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: lowerCAmelCase__ : Dict = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' ) if "model" in sd.keys(): lowerCAmelCase__ : Dict = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model'] # pop unnecessary weights lowerCAmelCase__ : Dict = [ 'decoder.version', 'decoder.output_projection.weight', ] for key in keys_to_delete: if key in sd: sd.pop(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = { 'decoder.project_in_dim.weight': 'decoder.project_in.weight', 'decoder.project_out_dim.weight': 'decoder.project_out.weight', 'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight', 'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: lowerCAmelCase__ : List[Any] = sd.pop(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : List[Any] = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: lowerCAmelCase__ : Tuple = sd[key] # We split QKV in separate Q,K,V lowerCAmelCase__ : str = key.replace('.qkv_proj.' , '.q_proj.' ) lowerCAmelCase__ : Optional[int] = key.replace('.qkv_proj.' , '.k_proj.' ) lowerCAmelCase__ : List[Any] = key.replace('.qkv_proj.' , '.v_proj.' ) lowerCAmelCase__ : Optional[Any] = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 lowerCAmelCase__ : Dict = torch.split(SCREAMING_SNAKE_CASE_ , depth // 3 , dim=0 ) lowerCAmelCase__ : Optional[int] = q lowerCAmelCase__ : Tuple = k lowerCAmelCase__ : Dict = v del sd[key] return sd @torch.no_grad() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Tuple: lowerCAmelCase__ : Optional[Any] = load_checkpoint(SCREAMING_SNAKE_CASE_ ) if config is not None: lowerCAmelCase__ : Any = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) else: lowerCAmelCase__ : Any = OPTConfig() lowerCAmelCase__ : str = OPTModel(SCREAMING_SNAKE_CASE_ ).half().eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check results Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fairseq_path""", type=str, help=( """path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:""" """ https://huggingface.co/models?other=opt_metasq""" ), ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""") lowerCamelCase__ = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
701
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class A__ ( __magic_name__ ): def __init__( self : List[str] , a : Optional[Any] , a : int=13 , a : str=7 , a : Any=True , a : List[str]=True , a : Any=False , a : List[Any]=True , a : List[str]=99 , a : Optional[Any]=32 , a : List[str]=5 , a : List[Any]=4 , a : List[Any]=64 , a : List[Any]="gelu" , a : List[Any]=0.1 , a : List[Any]=0.1 , a : int=512 , a : Tuple=16 , a : List[str]=2 , a : int=0.0_2 , a : Union[str, Any]=3 , a : Any=4 , a : Union[str, Any]=None , a : Union[str, Any]=2 , a : List[str]=2 , a : int=2 , a : Dict=2 , a : List[str]=4 , a : str=1 , ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = parent lowerCAmelCase__ : int = batch_size lowerCAmelCase__ : str = seq_length lowerCAmelCase__ : Tuple = is_training lowerCAmelCase__ : List[str] = use_input_mask lowerCAmelCase__ : Optional[int] = use_token_type_ids lowerCAmelCase__ : Any = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : str = hidden_size lowerCAmelCase__ : str = num_hidden_layers lowerCAmelCase__ : List[str] = num_attention_heads lowerCAmelCase__ : int = intermediate_size lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : Optional[int] = type_vocab_size lowerCAmelCase__ : Dict = type_sequence_label_size lowerCAmelCase__ : Optional[int] = initializer_range lowerCAmelCase__ : List[Any] = num_labels lowerCAmelCase__ : Any = num_choices lowerCAmelCase__ : str = scope lowerCAmelCase__ : Any = q_groups lowerCAmelCase__ : Any = k_groups lowerCAmelCase__ : Union[str, Any] = v_groups lowerCAmelCase__ : int = post_attention_groups lowerCAmelCase__ : str = intermediate_groups lowerCAmelCase__ : Union[str, Any] = output_groups def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : Tuple = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : Tuple = None if self.use_labels: lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self : str ): '''simple docstring''' return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : str , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = SqueezeBertModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = model(a , a ) lowerCAmelCase__ : Any = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : int , a : Union[str, Any] , a : Tuple , a : Any ): '''simple docstring''' lowerCAmelCase__ : List[str] = SqueezeBertForMaskedLM(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Any = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[Any] , a : str , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = SqueezeBertForQuestionAnswering(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = model( a , attention_mask=a , start_positions=a , end_positions=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self : Tuple , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : str , a : str , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.num_labels lowerCAmelCase__ : Dict = SqueezeBertForSequenceClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Any , a : int , a : Any , a : Dict , a : Any , a : Tuple , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = self.num_labels lowerCAmelCase__ : Dict = SqueezeBertForTokenClassification(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self : str , a : Optional[int] , a : List[Any] , a : int , a : List[Any] , a : Union[str, Any] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = model( a , attention_mask=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.prepare_config_and_inputs() ((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = config_and_inputs lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) lowercase = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = True lowercase = False def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = SqueezeBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , dim=37 ) def _lowerCamelCase ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a ) @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Optional[int] = SqueezeBertModel.from_pretrained(a ) self.assertIsNotNone(a ) @require_sentencepiece @require_tokenizers @require_torch class A__ ( unittest.TestCase ): @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) lowerCAmelCase__ : str = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] ) lowerCAmelCase__ : Any = model(a )[0] lowerCAmelCase__ : Tuple = torch.Size((1, 3) ) self.assertEqual(output.shape , a ) lowerCAmelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] ) self.assertTrue(torch.allclose(a , a , atol=1E-4 ) )
69
0
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder lowerCamelCase__ = """__DUMMY_TRANSFORMERS_USER__""" lowerCamelCase__ = """Dummy User""" lowerCamelCase__ = """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""" lowerCamelCase__ = """https://hub-ci.huggingface.co""" lowerCamelCase__ = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" lowerCamelCase__ = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" lowerCamelCase__ = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict: monkeypatch.setattr( 'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , SCREAMING_SNAKE_CASE_ ) @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: monkeypatch.setattr('datasets.config.HF_ENDPOINT' , SCREAMING_SNAKE_CASE_ ) monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , SCREAMING_SNAKE_CASE_ ) @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , SCREAMING_SNAKE_CASE_ ) @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) yield HfFolder.delete_token() @pytest.fixture(scope='session' ) def lowerCAmelCase__ ( ) -> Optional[Any]: return HfApi(endpoint=SCREAMING_SNAKE_CASE_ ) @pytest.fixture(scope='session' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : Union[str, Any] = HfFolder.get_token() HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]: def _cleanup_repo(SCREAMING_SNAKE_CASE_ ): hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) return _cleanup_repo @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: @contextmanager def _temporary_repo(SCREAMING_SNAKE_CASE_ ): try: yield repo_id finally: cleanup_repo(SCREAMING_SNAKE_CASE_ ) return _temporary_repo @pytest.fixture(scope='session' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : List[Any] = F'''repo_txt_data-{int(time.time() * 10e3 )}''' lowerCAmelCase__ : Tuple = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , private=SCREAMING_SNAKE_CASE_ ) hf_api.upload_file( token=SCREAMING_SNAKE_CASE_ , path_or_fileobj=str(SCREAMING_SNAKE_CASE_ ) , path_in_repo='data/text_data.txt' , repo_id=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope='session' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: lowerCAmelCase__ : int = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' lowerCAmelCase__ : int = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , private=SCREAMING_SNAKE_CASE_ ) hf_api.upload_file( token=SCREAMING_SNAKE_CASE_ , path_or_fileobj=str(SCREAMING_SNAKE_CASE_ ) , path_in_repo='data.zip' , repo_id=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope='session' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: lowerCAmelCase__ : int = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' lowerCAmelCase__ : Optional[int] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , private=SCREAMING_SNAKE_CASE_ ) hf_api.upload_file( token=SCREAMING_SNAKE_CASE_ , path_or_fileobj=str(SCREAMING_SNAKE_CASE_ ) , path_in_repo='data.zip' , repo_id=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: return hf_private_dataset_repo_zipped_img_data_
702
lowerCamelCase__ = """Alexander Joslin""" import operator as op from .stack import Stack def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} lowerCAmelCase__ : Stack[int] = Stack() lowerCAmelCase__ : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) ) elif i in operators: # RULE 2 operator_stack.push(SCREAMING_SNAKE_CASE_ ) elif i == ")": # RULE 4 lowerCAmelCase__ : List[Any] = operator_stack.peek() operator_stack.pop() lowerCAmelCase__ : List[str] = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ : List[Any] = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) operand_stack.push(SCREAMING_SNAKE_CASE_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
69
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase__ = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
703
import numpy class A__ : def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ): '''simple docstring''' lowerCAmelCase__ : int = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCAmelCase__ : Dict = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCAmelCase__ : List[str] = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 ) # Real output values provided. lowerCAmelCase__ : str = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : str = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCAmelCase__ : Tuple = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCAmelCase__ : Any = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) lowerCAmelCase__ : Optional[Any] = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) lowerCAmelCase__ : int = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ): '''simple docstring''' for iteration in range(1 , iterations + 1 ): lowerCAmelCase__ : Any = self.feedforward() self.back_propagation() if give_loss: lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'''Iteration {iteration} Loss: {loss}''' ) def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ): '''simple docstring''' lowerCAmelCase__ : Dict = input_arr lowerCAmelCase__ : Any = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) lowerCAmelCase__ : int = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) lowerCAmelCase__ : List[Any] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray: return (value) * (1 - (value)) def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : Any = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork( input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
69
0
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class A__ ( __magic_name__ ): @add_start_docstrings(a ) def __call__( self : int , a : torch.LongTensor , a : torch.FloatTensor , **a : Tuple ): '''simple docstring''' raise NotImplementedError('StoppingCriteria needs to be subclassed' ) class A__ ( __magic_name__ ): def __init__( self : int , a : int , a : Optional[int] = None ): '''simple docstring''' lowerCAmelCase__ : Dict = max_length lowerCAmelCase__ : Optional[int] = max_position_embeddings @add_start_docstrings(a ) def __call__( self : Dict , a : torch.LongTensor , a : torch.FloatTensor , **a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = input_ids.shape[-1] lowerCAmelCase__ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( 'This is a friendly reminder - the current text generation call will exceed the model\'s predefined ' f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' 'exceptions, performance degradation, or nothing at all.' ) return is_done class A__ ( __magic_name__ ): def __init__( self : List[Any] , a : int , a : int ): '''simple docstring''' warnings.warn( 'The class `MaxNewTokensCriteria` is deprecated. ' f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' 'with `max_length = start_length + max_new_tokens` instead.' , a , ) lowerCAmelCase__ : int = start_length lowerCAmelCase__ : List[str] = max_new_tokens lowerCAmelCase__ : Dict = start_length + max_new_tokens @add_start_docstrings(a ) def __call__( self : Any , a : torch.LongTensor , a : torch.FloatTensor , **a : str ): '''simple docstring''' return input_ids.shape[-1] >= self.max_length class A__ ( __magic_name__ ): def __init__( self : Union[str, Any] , a : float , a : Optional[float] = None ): '''simple docstring''' lowerCAmelCase__ : Any = max_time lowerCAmelCase__ : int = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(a ) def __call__( self : Optional[Any] , a : torch.LongTensor , a : torch.FloatTensor , **a : str ): '''simple docstring''' return time.time() - self.initial_timestamp > self.max_time class A__ ( __magic_name__ ): @add_start_docstrings(a ) def __call__( self : str , a : torch.LongTensor , a : torch.FloatTensor , **a : List[Any] ): '''simple docstring''' return any(criteria(a , a ) for criteria in self ) @property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' for stopping_criterium in self: if isinstance(a , a ): return stopping_criterium.max_length elif isinstance(a , a ): return stopping_criterium.max_length return None def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> StoppingCriteriaList: lowerCAmelCase__ : Tuple = stopping_criteria.max_length lowerCAmelCase__ : Optional[int] = deepcopy(SCREAMING_SNAKE_CASE_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , SCREAMING_SNAKE_CASE_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE_ ) ) return new_stopping_criteria
704
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ): '''simple docstring''' lowerCAmelCase__ : str = parent lowerCAmelCase__ : Union[str, Any] = batch_size lowerCAmelCase__ : List[str] = image_size lowerCAmelCase__ : Optional[Any] = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Optional[int] = embed_dim lowerCAmelCase__ : Tuple = depths lowerCAmelCase__ : List[str] = num_heads lowerCAmelCase__ : List[Any] = window_size lowerCAmelCase__ : Any = mlp_ratio lowerCAmelCase__ : Optional[Any] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : int = drop_path_rate lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : int = use_absolute_embeddings lowerCAmelCase__ : List[str] = patch_norm lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = initializer_range lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : List[Any] = scope lowerCAmelCase__ : Dict = use_labels lowerCAmelCase__ : List[Any] = type_sequence_label_size lowerCAmelCase__ : Optional[Any] = encoder_stride def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : Optional[Any] = None if self.use_labels: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : int = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = model(a ) lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : str = model(a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : List[str] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.type_sequence_label_size lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs lowerCAmelCase__ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowercase = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self ) lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) @unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='Swinv2 does not use inputs_embeds' ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[int] = model_class(a ) lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Tuple = [*signature.parameters.keys()] lowerCAmelCase__ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : str = False lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : Dict = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Dict = outputs.attentions lowerCAmelCase__ : Dict = len(self.model_tester.depths ) self.assertEqual(len(a ) , a ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Optional[int] = config.window_size**2 lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCAmelCase__ : Tuple = len(a ) # Check attention is always last and order is fine lowerCAmelCase__ : str = True lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) if hasattr(self.model_tester , 'num_hidden_states_types' ): lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase__ : Any = 2 self.assertEqual(out_len + added_hidden_states , len(a ) ) lowerCAmelCase__ : Dict = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.hidden_states lowerCAmelCase__ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a ) , a ) # Swinv2 has a different seq_length lowerCAmelCase__ : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(a ) , a ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape lowerCAmelCase__ : List[str] = ( reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Any = 3 lowerCAmelCase__ : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase__ : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase__ : str = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a ) self.assertIsNotNone(a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = _config_zero_init(a ) for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(config=a ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class A__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to( a ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**a ) # verify the logits lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
69
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) lowerCAmelCase__ : Optional[Any] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" lowerCAmelCase__ : Tuple = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" lowerCAmelCase__ : List[str] = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
705
from itertools import permutations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False lowerCAmelCase__ : str = [7, 11, 13, 17] for i, test in enumerate(SCREAMING_SNAKE_CASE_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int: return sum( int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) for num in permutations(range(SCREAMING_SNAKE_CASE_ ) ) if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
69
0
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class A__ ( __magic_name__ , unittest.TestCase ): lowercase = BlenderbotSmallTokenizer lowercase = False def _lowerCamelCase ( self : Any ): '''simple docstring''' super().setUp() lowerCAmelCase__ : Any = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] lowerCAmelCase__ : str = dict(zip(a , range(len(a ) ) ) ) lowerCAmelCase__ : Union[str, Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] lowerCAmelCase__ : str = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(a ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(a ) ) def _lowerCamelCase ( self : List[str] , **a : Union[str, Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **a ) def _lowerCamelCase ( self : Tuple , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = 'adapt act apte' lowerCAmelCase__ : List[Any] = 'adapt act apte' return input_text, output_text def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase__ : Dict = 'adapt act apte' lowerCAmelCase__ : str = ['adapt', 'act', 'ap@@', 'te'] lowerCAmelCase__ : Dict = tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowerCAmelCase__ : int = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] lowerCAmelCase__ : Optional[int] = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1_384] lowerCAmelCase__ : Any = 'I am a small frog.' lowerCAmelCase__ : List[str] = tok([src_text] , padding=a , truncation=a )['input_ids'] lowerCAmelCase__ : Union[str, Any] = tok.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : int = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) lowerCAmelCase__ : Optional[int] = 'I am a small frog .' lowerCAmelCase__ : Any = '.' lowerCAmelCase__ : Optional[int] = tok(a )['input_ids'] lowerCAmelCase__ : Any = tok(a )['input_ids'] assert encoded[-1] == encoded_dot[0]
706
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( __magic_name__ , unittest.TestCase ): lowercase = ConsistencyModelPipeline lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt lowercase = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet' , ) return unet @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , ) return unet def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ): '''simple docstring''' if class_cond: lowerCAmelCase__ : Tuple = self.dummy_cond_unet else: lowerCAmelCase__ : Dict = self.dummy_uncond_unet # Default to CM multistep sampler lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : List[Any] = { 'unet': unet, 'scheduler': scheduler, } return components def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ): '''simple docstring''' if str(a ).startswith('mps' ): lowerCAmelCase__ : List[str] = torch.manual_seed(a ) else: lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : str = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [22, 0], 'generator': generator, 'output_type': 'np', } return inputs def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Optional[Any] = self.get_dummy_components() lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Tuple = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = self.get_dummy_inputs(a ) lowerCAmelCase__ : str = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : str = image[0, -3:, -3:, -1] lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a ) lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Tuple = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a ) lowerCAmelCase__ : int = 0 lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Dict = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a ) lowerCAmelCase__ : Optional[Any] = 1 lowerCAmelCase__ : Dict = None lowerCAmelCase__ : List[Any] = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a ) lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a ) lowerCAmelCase__ : Optional[Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a ) lowerCAmelCase__ : Dict = 1 lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : str = pipe(**a ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a ) lowerCAmelCase__ : List[Any] = { 'num_inference_steps': None, 'timesteps': [22, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a ) lowerCAmelCase__ : Tuple = latents return inputs def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ): '''simple docstring''' if type(a ) == str: lowerCAmelCase__ : str = torch.device(a ) lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a ) return latents def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Optional[Any] = self.get_inputs() lowerCAmelCase__ : Dict = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : Any = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[str] = self.get_inputs() lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : List[str] = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ): lowerCAmelCase__ : Dict = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : str = image[0, -3:, -3:, -1] lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a ) pipe.to(torch_device=a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a ) lowerCAmelCase__ : List[str] = 1 lowerCAmelCase__ : str = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ): lowerCAmelCase__ : List[str] = pipe(**a ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
69
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
707
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class A__ ( __magic_name__ ): def __init__( self : int , a : List[str] , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = params lowerCAmelCase__ : Union[str, Any] = np.array(a ) lowerCAmelCase__ : List[Any] = np.array([len(a ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : str , a : List[str] ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Optional[int] ): '''simple docstring''' return len(self.lengths ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.params.max_model_input_size lowerCAmelCase__ : Optional[int] = self.lengths > max_len logger.info(f'''Splitting {sum(a )} too long sequences.''' ) def divide_chunks(a : List[str] , a : Tuple ): return [l[i : i + n] for i in range(0 , len(a ) , a )] lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Union[str, Any] = [] if self.params.mlm: lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: lowerCAmelCase__ , lowerCAmelCase__ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowerCAmelCase__ : Optional[int] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: lowerCAmelCase__ : Dict = np.insert(a , 0 , a ) if sub_s[-1] != sep_id: lowerCAmelCase__ : Dict = np.insert(a , len(a ) , a ) assert len(a ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(a ) new_tok_ids.extend(a ) new_lengths.extend([len(a ) for l in sub_seqs] ) lowerCAmelCase__ : str = np.array(a ) lowerCAmelCase__ : Optional[Any] = np.array(a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = len(self ) lowerCAmelCase__ : List[Any] = self.lengths > 11 lowerCAmelCase__ : Dict = self.token_ids[indices] lowerCAmelCase__ : Tuple = self.lengths[indices] lowerCAmelCase__ : Any = len(self ) logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: lowerCAmelCase__ : int = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : str = len(self ) lowerCAmelCase__ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowerCAmelCase__ : int = (unk_occs / self.lengths) < 0.5 lowerCAmelCase__ : List[str] = self.token_ids[indices] lowerCAmelCase__ : Optional[Any] = self.lengths[indices] lowerCAmelCase__ : Union[str, Any] = len(self ) logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' if not self.params.is_master: return logger.info(f'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _lowerCamelCase ( self : int , a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = [t[0] for t in batch] lowerCAmelCase__ : List[str] = [t[1] for t in batch] assert len(a ) == len(a ) # Max for paddings lowerCAmelCase__ : List[str] = max(a ) # Pad token ids if self.params.mlm: lowerCAmelCase__ : str = self.params.special_tok_ids['pad_token'] else: lowerCAmelCase__ : Optional[int] = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : Tuple = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids] assert len(tk_ ) == len(a ) assert all(len(a ) == max_seq_len_ for t in tk_ ) lowerCAmelCase__ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) lowerCAmelCase__ : List[str] = torch.tensor(a ) # (bs) return tk_t, lg_t
69
0
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( __magic_name__ , unittest.TestCase ): lowercase = BarthezTokenizer lowercase = BarthezTokenizerFast lowercase = True lowercase = True def _lowerCamelCase ( self : Any ): '''simple docstring''' super().setUp() lowerCAmelCase__ : int = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=a ) lowerCAmelCase__ : int = tokenizer def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = '<pad>' lowerCAmelCase__ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(a ) , 101_122 ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101_122 ) @require_torch def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] lowerCAmelCase__ : List[str] = [0, 57, 3_018, 70_307, 91, 2] lowerCAmelCase__ : Any = self.tokenizer( a , max_length=len(a ) , padding=a , truncation=a , return_tensors='pt' ) self.assertIsInstance(a , a ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCAmelCase__ : int = batch.input_ids.tolist()[0] self.assertListEqual(a , a ) def _lowerCamelCase ( self : str ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCAmelCase__ : Optional[int] = self.get_tokenizer() lowerCAmelCase__ : List[Any] = self.get_rust_tokenizer() lowerCAmelCase__ : int = 'I was born in 92000, and this is falsé.' lowerCAmelCase__ : List[str] = tokenizer.tokenize(a ) lowerCAmelCase__ : List[Any] = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowerCAmelCase__ : List[Any] = tokenizer.encode(a , add_special_tokens=a ) lowerCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer() lowerCAmelCase__ : Optional[int] = tokenizer.encode(a ) lowerCAmelCase__ : List[Any] = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) @slow def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : int = {'input_ids': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCAmelCase__ : int = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=a , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=a , )
708
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
69
0
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput lowerCamelCase__ = 8 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=BITS ) -> Dict: lowerCAmelCase__ : Tuple = x.device lowerCAmelCase__ : int = (x * 255).int().clamp(0 , 255 ) lowerCAmelCase__ : int = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : List[Any] = rearrange(SCREAMING_SNAKE_CASE_ , 'd -> d 1 1' ) lowerCAmelCase__ : int = rearrange(SCREAMING_SNAKE_CASE_ , 'b c h w -> b c 1 h w' ) lowerCAmelCase__ : List[Any] = ((x & mask) != 0).float() lowerCAmelCase__ : Any = rearrange(SCREAMING_SNAKE_CASE_ , 'b c d h w -> b (c d) h w' ) lowerCAmelCase__ : str = bits * 2 - 1 return bits def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=BITS ) -> Union[str, Any]: lowerCAmelCase__ : int = x.device lowerCAmelCase__ : Any = (x > 0).int() lowerCAmelCase__ : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE_ , dtype=torch.intaa ) lowerCAmelCase__ : int = rearrange(SCREAMING_SNAKE_CASE_ , 'd -> d 1 1' ) lowerCAmelCase__ : List[str] = rearrange(SCREAMING_SNAKE_CASE_ , 'b (c d) h w -> b c d h w' , d=8 ) lowerCAmelCase__ : Dict = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 255).clamp(0.0 , 1.0 ) def lowerCAmelCase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ) -> Union[DDIMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) lowerCAmelCase__ : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas lowerCAmelCase__ : Union[str, Any] = self.alphas_cumprod[timestep] lowerCAmelCase__ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod lowerCAmelCase__ : int = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCAmelCase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" lowerCAmelCase__ : str = self.bit_scale if self.config.clip_sample: lowerCAmelCase__ : int = torch.clamp(SCREAMING_SNAKE_CASE_ , -scale , SCREAMING_SNAKE_CASE_ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) lowerCAmelCase__ : List[Any] = self._get_variance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide lowerCAmelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCAmelCase__ : Union[str, Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCAmelCase__ : str = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 lowerCAmelCase__ : Union[str, Any] = model_output.device if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else 'cpu' lowerCAmelCase__ : str = torch.randn(model_output.shape , dtype=model_output.dtype , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Union[str, Any] = self._get_variance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ** 0.5 * eta * noise lowerCAmelCase__ : int = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="epsilon" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ) -> Union[DDPMSchedulerOutput, Tuple]: lowerCAmelCase__ : Optional[int] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: lowerCAmelCase__ : Union[str, Any] = torch.split(SCREAMING_SNAKE_CASE_ , sample.shape[1] , dim=1 ) else: lowerCAmelCase__ : str = None # 1. compute alphas, betas lowerCAmelCase__ : str = self.alphas_cumprod[t] lowerCAmelCase__ : Optional[Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one lowerCAmelCase__ : Dict = 1 - alpha_prod_t lowerCAmelCase__ : Optional[Any] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": lowerCAmelCase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": lowerCAmelCase__ : str = model_output else: raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' ) # 3. Clip "predicted x_0" lowerCAmelCase__ : Any = self.bit_scale if self.config.clip_sample: lowerCAmelCase__ : str = torch.clamp(SCREAMING_SNAKE_CASE_ , -scale , SCREAMING_SNAKE_CASE_ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t lowerCAmelCase__ : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowerCAmelCase__ : Dict = 0 if t > 0: lowerCAmelCase__ : Dict = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=SCREAMING_SNAKE_CASE_ ).to(model_output.device ) lowerCAmelCase__ : List[str] = (self._get_variance(SCREAMING_SNAKE_CASE_ , predicted_variance=SCREAMING_SNAKE_CASE_ ) ** 0.5) * noise lowerCAmelCase__ : Tuple = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) class A__ ( __magic_name__ ): def __init__( self : str , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : Optional[float] = 1.0 , ): '''simple docstring''' super().__init__() lowerCAmelCase__ : Union[str, Any] = bit_scale lowerCAmelCase__ : Tuple = ( ddim_bit_scheduler_step if isinstance(a , a ) else ddpm_bit_scheduler_step ) self.register_modules(unet=a , scheduler=a ) @torch.no_grad() def __call__( self : List[Any] , a : Optional[int] = 256 , a : Optional[int] = 256 , a : Optional[int] = 50 , a : Optional[torch.Generator] = None , a : Optional[int] = 1 , a : Optional[str] = "pil" , a : bool = True , **a : List[Any] , ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=a , ) lowerCAmelCase__ : Optional[int] = decimal_to_bits(a ) * self.bit_scale lowerCAmelCase__ : Optional[Any] = latents.to(self.device ) self.scheduler.set_timesteps(a ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual lowerCAmelCase__ : int = self.unet(a , a ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase__ : Dict = self.scheduler.step(a , a , a ).prev_sample lowerCAmelCase__ : Tuple = bits_to_decimal(a ) if output_type == "pil": lowerCAmelCase__ : List[Any] = self.numpy_to_pil(a ) if not return_dict: return (image,) return ImagePipelineOutput(images=a )
709
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, """tokenizer_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""", }, } lowerCamelCase__ = { """google/rembert""": 256, } lowerCamelCase__ = """▁""" class A__ ( __magic_name__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = RemBertTokenizer def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , ) lowerCAmelCase__ : int = do_lower_case lowerCAmelCase__ : int = remove_space lowerCAmelCase__ : List[Any] = keep_accents lowerCAmelCase__ : Optional[Any] = vocab_file lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Dict = [self.sep_token_id] lowerCAmelCase__ : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1] return [1] + ([0] * len(a )) + [1] def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Tuple = [self.sep_token_id] lowerCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(a ): logger.error('Vocabulary path ({}) should be a directory'.format(a ) ) return lowerCAmelCase__ : int = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
69
0
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ): '''simple docstring''' lowerCAmelCase__ : str = parent lowerCAmelCase__ : Union[str, Any] = batch_size lowerCAmelCase__ : List[str] = image_size lowerCAmelCase__ : Optional[Any] = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Optional[int] = embed_dim lowerCAmelCase__ : Tuple = depths lowerCAmelCase__ : List[str] = num_heads lowerCAmelCase__ : List[Any] = window_size lowerCAmelCase__ : Any = mlp_ratio lowerCAmelCase__ : Optional[Any] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : int = drop_path_rate lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : int = use_absolute_embeddings lowerCAmelCase__ : List[str] = patch_norm lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = initializer_range lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : List[Any] = scope lowerCAmelCase__ : Dict = use_labels lowerCAmelCase__ : List[Any] = type_sequence_label_size lowerCAmelCase__ : Optional[Any] = encoder_stride def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : Optional[Any] = None if self.use_labels: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : int = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = model(a ) lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : str = model(a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : List[str] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ): '''simple docstring''' lowerCAmelCase__ : str = self.type_sequence_label_size lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs() lowerCAmelCase__ : Optional[int] = config_and_inputs lowerCAmelCase__ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) lowercase = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self ) lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) @unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='Swinv2 does not use inputs_embeds' ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[int] = model_class(a ) lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Tuple = [*signature.parameters.keys()] lowerCAmelCase__ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : str = False lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : Dict = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Dict = outputs.attentions lowerCAmelCase__ : Dict = len(self.model_tester.depths ) self.assertEqual(len(a ) , a ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Optional[int] = config.window_size**2 lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCAmelCase__ : Tuple = len(a ) # Check attention is always last and order is fine lowerCAmelCase__ : str = True lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) if hasattr(self.model_tester , 'num_hidden_states_types' ): lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase__ : Any = 2 self.assertEqual(out_len + added_hidden_states , len(a ) ) lowerCAmelCase__ : Dict = outputs.attentions self.assertEqual(len(a ) , a ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowerCAmelCase__ : Optional[Any] = outputs.hidden_states lowerCAmelCase__ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a ) , a ) # Swinv2 has a different seq_length lowerCAmelCase__ : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(a ) , a ) lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape lowerCAmelCase__ : List[str] = ( reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Any = 3 lowerCAmelCase__ : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase__ : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase__ : str = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Any = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a ) self.assertIsNotNone(a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = _config_zero_init(a ) for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(config=a ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class A__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to( a ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**a ) # verify the logits lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
710
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class A__ ( __magic_name__ ): def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ): '''simple docstring''' super().__init__( features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , ) lowerCAmelCase__ : int = Generator( cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' if self.streaming: lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: lowerCAmelCase__ : Any = None lowerCAmelCase__ : int = None lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : Dict = None self.builder.download_and_prepare( download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , ) lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset( split='train' , verification_mode=a , in_memory=self.keep_in_memory ) return dataset
69
0